2024-11-11 04:23:27,811 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 2024-11-11 04:23:27,829 main DEBUG Took 0.015908 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-11 04:23:27,830 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-11 04:23:27,830 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-11 04:23:27,831 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-11 04:23:27,832 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:23:27,839 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-11 04:23:27,850 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:23:27,851 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:23:27,852 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:23:27,852 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:23:27,853 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:23:27,853 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:23:27,854 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:23:27,854 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:23:27,855 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:23:27,855 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:23:27,856 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:23:27,856 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:23:27,857 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:23:27,857 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:23:27,858 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:23:27,858 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:23:27,858 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:23:27,859 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:23:27,859 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:23:27,859 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:23:27,860 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:23:27,860 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:23:27,861 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:23:27,861 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:23:27,861 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:23:27,861 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-11 04:23:27,863 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:23:27,864 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-11 04:23:27,866 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-11 04:23:27,866 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-11 04:23:27,867 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-11 04:23:27,868 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-11 04:23:27,875 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-11 04:23:27,878 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-11 04:23:27,879 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-11 04:23:27,880 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-11 04:23:27,880 main DEBUG createAppenders(={Console}) 2024-11-11 04:23:27,881 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 initialized 2024-11-11 04:23:27,881 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 2024-11-11 04:23:27,881 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 OK. 2024-11-11 04:23:27,882 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-11 04:23:27,882 main DEBUG OutputStream closed 2024-11-11 04:23:27,882 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-11 04:23:27,882 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-11 04:23:27,883 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@25fb8912 OK 2024-11-11 04:23:27,951 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-11 04:23:27,953 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-11 04:23:27,954 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-11 04:23:27,955 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-11 04:23:27,956 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-11 04:23:27,956 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-11 04:23:27,957 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-11 04:23:27,957 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-11 04:23:27,957 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-11 04:23:27,957 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-11 04:23:27,958 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-11 04:23:27,958 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-11 04:23:27,958 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-11 04:23:27,959 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-11 04:23:27,959 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-11 04:23:27,959 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-11 04:23:27,959 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-11 04:23:27,960 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-11 04:23:27,962 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11 04:23:27,963 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@64a40280) with optional ClassLoader: null 2024-11-11 04:23:27,963 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-11 04:23:27,964 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@64a40280] started OK. 2024-11-11T04:23:27,976 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.master.balancer.TestBalancerDecision timeout: 13 mins 2024-11-11 04:23:27,978 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-11 04:23:27,979 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11T04:23:28,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-11T04:23:28,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T04:23:28,635 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: master.balancer.TestBalancerDecision#testBalancerDecisions Thread=12, OpenFileDescriptor=286, MaxFileDescriptor=1048576, SystemLoadAverage=182, ProcessCount=11, AvailableMemoryMB=5651 2024-11-11T04:23:28,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-11T04:23:28,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T04:23:28,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=-1.0 2024-11-11T04:23:28,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=true, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T04:23:28,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:28,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:28,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:28,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:28,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:28,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:28,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:28,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:28,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:28,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:28,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:28,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:28,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2015397649=0, srv2092102296=1} racks are {rack=0} 2024-11-11T04:23:28,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:28,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2015397649=0, srv2092102296=1} racks are {rack=0} 2024-11-11T04:23:28,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:28,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv529008050=1, srv1918564513=0} racks are {rack=0} 2024-11-11T04:23:28,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:28,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv529008050=1, srv1918564513=0} racks are {rack=0} 2024-11-11T04:23:28,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:28,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv206214587=1, srv1971107148=0} racks are {rack=0} 2024-11-11T04:23:28,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:28,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv206214587=1, srv1971107148=0} racks are {rack=0} 2024-11-11T04:23:28,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:28,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv479314905=1, srv45193953=0} racks are {rack=0} 2024-11-11T04:23:28,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:28,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv479314905=1, srv45193953=0} racks are {rack=0} 2024-11-11T04:23:28,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:28,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv479314905=1, srv45193953=0} racks are {rack=0} 2024-11-11T04:23:28,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:28,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1108263876=0, srv480941456=1} racks are {rack=0} 2024-11-11T04:23:28,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:28,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1108263876=0, srv480941456=1} racks are {rack=0} 2024-11-11T04:23:28,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:28,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1108263876=0, srv480941456=1} racks are {rack=0} 2024-11-11T04:23:28,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:28,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1108263876=0, srv480941456=1} racks are {rack=0} 2024-11-11T04:23:28,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:28,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1946614376=0, srv543253598=1} racks are {rack=0} 2024-11-11T04:23:28,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:28,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv432814322=1, srv1028481184=0} racks are {rack=0} 2024-11-11T04:23:28,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:28,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1858098413=0, srv55286039=1} racks are {rack=0} 2024-11-11T04:23:28,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:28,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1858098413=0, srv55286039=1} racks are {rack=0} 2024-11-11T04:23:28,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:28,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1858098413=0, srv55286039=1} racks are {rack=0} 2024-11-11T04:23:28,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:28,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1858098413=0, srv55286039=1} racks are {rack=0} 2024-11-11T04:23:28,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:28,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1858098413=0, srv55286039=1} racks are {rack=0} 2024-11-11T04:23:28,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:28,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1858098413=0, srv55286039=1} racks are {rack=0} 2024-11-11T04:23:28,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:28,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1858098413=0, srv55286039=1} racks are {rack=0} 2024-11-11T04:23:28,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:28,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1858098413=0, srv55286039=1} racks are {rack=0} 2024-11-11T04:23:28,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:28,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1858098413=0, srv55286039=1} racks are {rack=0} 2024-11-11T04:23:28,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:28,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1858098413=0, srv55286039=1} racks are {rack=0} 2024-11-11T04:23:28,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1380 2024-11-11T04:23:28,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1381 2024-11-11T04:23:28,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table880 2024-11-11T04:23:28,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table880) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1140 2024-11-11T04:23:28,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1382 2024-11-11T04:23:28,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table881 2024-11-11T04:23:28,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table881) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1141 2024-11-11T04:23:28,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1383 2024-11-11T04:23:28,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table640 2024-11-11T04:23:28,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table640) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table882 2024-11-11T04:23:28,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table882) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1142 2024-11-11T04:23:28,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1384 2024-11-11T04:23:28,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table641 2024-11-11T04:23:28,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table641) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table883 2024-11-11T04:23:28,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table883) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1143 2024-11-11T04:23:28,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1385 2024-11-11T04:23:28,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table400 2024-11-11T04:23:28,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table642 2024-11-11T04:23:28,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table642) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table884 2024-11-11T04:23:28,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table884) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1144 2024-11-11T04:23:28,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1386 2024-11-11T04:23:28,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table401 2024-11-11T04:23:28,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table643 2024-11-11T04:23:28,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table643) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table885 2024-11-11T04:23:28,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table885) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1145 2024-11-11T04:23:28,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1387 2024-11-11T04:23:28,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table402 2024-11-11T04:23:28,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table644 2024-11-11T04:23:28,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table644) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table886 2024-11-11T04:23:28,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table886) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table403 2024-11-11T04:23:28,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table645 2024-11-11T04:23:28,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table645) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table887 2024-11-11T04:23:28,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table887) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table404 2024-11-11T04:23:28,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table646 2024-11-11T04:23:28,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table646) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table888 2024-11-11T04:23:28,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table888) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table405 2024-11-11T04:23:28,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table647 2024-11-11T04:23:28,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table647) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table889 2024-11-11T04:23:28,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table889) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table406 2024-11-11T04:23:28,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table648 2024-11-11T04:23:28,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table648) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table407 2024-11-11T04:23:28,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table649 2024-11-11T04:23:28,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table649) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table408 2024-11-11T04:23:28,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table409 2024-11-11T04:23:28,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1146 2024-11-11T04:23:28,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1388 2024-11-11T04:23:28,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1147 2024-11-11T04:23:28,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1389 2024-11-11T04:23:28,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1148 2024-11-11T04:23:28,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1149 2024-11-11T04:23:28,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1370 2024-11-11T04:23:28,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1371 2024-11-11T04:23:28,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table870 2024-11-11T04:23:28,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table870) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1130 2024-11-11T04:23:28,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1372 2024-11-11T04:23:28,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table871 2024-11-11T04:23:28,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table871) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1131 2024-11-11T04:23:28,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1373 2024-11-11T04:23:28,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table630 2024-11-11T04:23:28,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table630) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table872 2024-11-11T04:23:28,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table872) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1132 2024-11-11T04:23:28,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1374 2024-11-11T04:23:28,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table631 2024-11-11T04:23:28,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table631) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table873 2024-11-11T04:23:28,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table873) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1133 2024-11-11T04:23:28,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1375 2024-11-11T04:23:28,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table632 2024-11-11T04:23:28,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table632) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table874 2024-11-11T04:23:28,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table874) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1134 2024-11-11T04:23:28,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1376 2024-11-11T04:23:28,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table633 2024-11-11T04:23:28,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table633) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table875 2024-11-11T04:23:28,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table875) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table634 2024-11-11T04:23:28,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table634) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table876 2024-11-11T04:23:28,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table876) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table635 2024-11-11T04:23:28,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table635) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table877 2024-11-11T04:23:28,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table877) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table636 2024-11-11T04:23:28,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table636) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table878 2024-11-11T04:23:28,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table878) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table637 2024-11-11T04:23:28,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table637) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table879 2024-11-11T04:23:28,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table879) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table638 2024-11-11T04:23:28,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table638) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table639 2024-11-11T04:23:28,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table639) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1135 2024-11-11T04:23:28,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1377 2024-11-11T04:23:28,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1136 2024-11-11T04:23:28,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1378 2024-11-11T04:23:28,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1137 2024-11-11T04:23:28,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1379 2024-11-11T04:23:28,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1138 2024-11-11T04:23:28,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1139 2024-11-11T04:23:28,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table660 2024-11-11T04:23:28,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table660) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1360 2024-11-11T04:23:28,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table661 2024-11-11T04:23:28,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table661) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1361 2024-11-11T04:23:28,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table420 2024-11-11T04:23:28,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table662 2024-11-11T04:23:28,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table662) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1120 2024-11-11T04:23:28,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1362 2024-11-11T04:23:28,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table421 2024-11-11T04:23:28,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table663 2024-11-11T04:23:28,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table663) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1121 2024-11-11T04:23:28,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1363 2024-11-11T04:23:28,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table422 2024-11-11T04:23:28,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table664 2024-11-11T04:23:28,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table664) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1122 2024-11-11T04:23:28,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1364 2024-11-11T04:23:28,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table423 2024-11-11T04:23:28,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table665 2024-11-11T04:23:28,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table665) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1123 2024-11-11T04:23:28,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1365 2024-11-11T04:23:28,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table424 2024-11-11T04:23:28,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table666 2024-11-11T04:23:28,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table666) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table425 2024-11-11T04:23:28,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table667 2024-11-11T04:23:28,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table667) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table426 2024-11-11T04:23:28,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table668 2024-11-11T04:23:28,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table668) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table427 2024-11-11T04:23:28,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table669 2024-11-11T04:23:28,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table669) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table428 2024-11-11T04:23:28,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table429 2024-11-11T04:23:28,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1124 2024-11-11T04:23:28,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1366 2024-11-11T04:23:28,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1125 2024-11-11T04:23:28,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1367 2024-11-11T04:23:28,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1126 2024-11-11T04:23:28,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1368 2024-11-11T04:23:28,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1127 2024-11-11T04:23:28,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1369 2024-11-11T04:23:28,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1128 2024-11-11T04:23:28,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1129 2024-11-11T04:23:28,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table890 2024-11-11T04:23:28,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table890) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table891 2024-11-11T04:23:28,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table891) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table650 2024-11-11T04:23:28,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table650) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table892 2024-11-11T04:23:28,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table892) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1350 2024-11-11T04:23:28,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table651 2024-11-11T04:23:28,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table651) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table893 2024-11-11T04:23:28,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table893) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1351 2024-11-11T04:23:28,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table410 2024-11-11T04:23:28,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table652 2024-11-11T04:23:28,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table652) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table894 2024-11-11T04:23:28,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table894) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1110 2024-11-11T04:23:28,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1352 2024-11-11T04:23:28,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table411 2024-11-11T04:23:28,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table653 2024-11-11T04:23:28,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table653) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table895 2024-11-11T04:23:28,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table895) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1111 2024-11-11T04:23:28,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1353 2024-11-11T04:23:28,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table412 2024-11-11T04:23:28,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table654 2024-11-11T04:23:28,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table654) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table896 2024-11-11T04:23:28,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table896) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1112 2024-11-11T04:23:28,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1354 2024-11-11T04:23:28,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table413 2024-11-11T04:23:28,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table655 2024-11-11T04:23:28,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table655) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table897 2024-11-11T04:23:28,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table897) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table414 2024-11-11T04:23:28,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table656 2024-11-11T04:23:28,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table656) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table898 2024-11-11T04:23:28,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:28,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:28,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:28,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:28,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:28,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:28,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:28,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table898) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:28,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table415 2024-11-11T04:23:28,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table657 2024-11-11T04:23:29,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table657) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table899 2024-11-11T04:23:29,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table899) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table416 2024-11-11T04:23:29,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table658 2024-11-11T04:23:29,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table658) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table417 2024-11-11T04:23:29,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table659 2024-11-11T04:23:29,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table659) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table418 2024-11-11T04:23:29,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table419 2024-11-11T04:23:29,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1113 2024-11-11T04:23:29,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1355 2024-11-11T04:23:29,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1114 2024-11-11T04:23:29,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1356 2024-11-11T04:23:29,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1115 2024-11-11T04:23:29,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1357 2024-11-11T04:23:29,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1116 2024-11-11T04:23:29,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1358 2024-11-11T04:23:29,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1117 2024-11-11T04:23:29,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1359 2024-11-11T04:23:29,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1118 2024-11-11T04:23:29,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1119 2024-11-11T04:23:29,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1182 2024-11-11T04:23:29,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1183 2024-11-11T04:23:29,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1184 2024-11-11T04:23:29,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1185 2024-11-11T04:23:29,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1186 2024-11-11T04:23:29,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1187 2024-11-11T04:23:29,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table840 2024-11-11T04:23:29,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table840) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1188 2024-11-11T04:23:29,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table841 2024-11-11T04:23:29,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table841) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1189 2024-11-11T04:23:29,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table600 2024-11-11T04:23:29,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table600) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table842 2024-11-11T04:23:29,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table842) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table601 2024-11-11T04:23:29,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table601) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table843 2024-11-11T04:23:29,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table843) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table602 2024-11-11T04:23:29,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table602) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table844 2024-11-11T04:23:29,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table844) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table603 2024-11-11T04:23:29,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table603) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table845 2024-11-11T04:23:29,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table845) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table604 2024-11-11T04:23:29,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table604) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table846 2024-11-11T04:23:29,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table846) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table605 2024-11-11T04:23:29,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table605) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table847 2024-11-11T04:23:29,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table847) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table606 2024-11-11T04:23:29,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table606) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table848 2024-11-11T04:23:29,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table848) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1180 2024-11-11T04:23:29,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table607 2024-11-11T04:23:29,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table607) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table849 2024-11-11T04:23:29,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table849) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1181 2024-11-11T04:23:29,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table608 2024-11-11T04:23:29,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table608) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table609 2024-11-11T04:23:29,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table609) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1171 2024-11-11T04:23:29,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1172 2024-11-11T04:23:29,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1173 2024-11-11T04:23:29,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1174 2024-11-11T04:23:29,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1175 2024-11-11T04:23:29,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1176 2024-11-11T04:23:29,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1177 2024-11-11T04:23:29,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table830 2024-11-11T04:23:29,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table830) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1178 2024-11-11T04:23:29,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table831 2024-11-11T04:23:29,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table831) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table832 2024-11-11T04:23:29,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table832) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table833 2024-11-11T04:23:29,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table833) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table834 2024-11-11T04:23:29,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table834) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table835 2024-11-11T04:23:29,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table835) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table836 2024-11-11T04:23:29,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table836) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table837 2024-11-11T04:23:29,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table837) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table838 2024-11-11T04:23:29,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table838) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1170 2024-11-11T04:23:29,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table839 2024-11-11T04:23:29,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table839) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1179 2024-11-11T04:23:29,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1160 2024-11-11T04:23:29,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1161 2024-11-11T04:23:29,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1162 2024-11-11T04:23:29,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1163 2024-11-11T04:23:29,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table860 2024-11-11T04:23:29,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table860) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1164 2024-11-11T04:23:29,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table861 2024-11-11T04:23:29,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table861) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1165 2024-11-11T04:23:29,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table620 2024-11-11T04:23:29,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table620) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table862 2024-11-11T04:23:29,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table862) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1166 2024-11-11T04:23:29,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table621 2024-11-11T04:23:29,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table621) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table863 2024-11-11T04:23:29,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table863) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1167 2024-11-11T04:23:29,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table622 2024-11-11T04:23:29,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table622) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table864 2024-11-11T04:23:29,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table864) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table623 2024-11-11T04:23:29,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table623) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table865 2024-11-11T04:23:29,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table865) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table624 2024-11-11T04:23:29,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table624) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table866 2024-11-11T04:23:29,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table866) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table625 2024-11-11T04:23:29,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table625) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table867 2024-11-11T04:23:29,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table867) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table626 2024-11-11T04:23:29,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table626) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table868 2024-11-11T04:23:29,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table868) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table627 2024-11-11T04:23:29,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table627) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table869 2024-11-11T04:23:29,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table869) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table628 2024-11-11T04:23:29,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table628) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table629 2024-11-11T04:23:29,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table629) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1168 2024-11-11T04:23:29,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1169 2024-11-11T04:23:29,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1391 2024-11-11T04:23:29,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1150 2024-11-11T04:23:29,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1392 2024-11-11T04:23:29,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1151 2024-11-11T04:23:29,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1393 2024-11-11T04:23:29,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1152 2024-11-11T04:23:29,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1394 2024-11-11T04:23:29,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1153 2024-11-11T04:23:29,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1395 2024-11-11T04:23:29,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table850 2024-11-11T04:23:29,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table850) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1154 2024-11-11T04:23:29,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1396 2024-11-11T04:23:29,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table851 2024-11-11T04:23:29,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table851) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1155 2024-11-11T04:23:29,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1397 2024-11-11T04:23:29,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table610 2024-11-11T04:23:29,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table610) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table852 2024-11-11T04:23:29,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table852) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1156 2024-11-11T04:23:29,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1398 2024-11-11T04:23:29,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table611 2024-11-11T04:23:29,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table611) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table853 2024-11-11T04:23:29,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table853) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table612 2024-11-11T04:23:29,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table612) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table854 2024-11-11T04:23:29,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table854) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table613 2024-11-11T04:23:29,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table613) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table855 2024-11-11T04:23:29,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table855) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table614 2024-11-11T04:23:29,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table614) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table856 2024-11-11T04:23:29,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table856) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table615 2024-11-11T04:23:29,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table615) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table857 2024-11-11T04:23:29,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table857) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table616 2024-11-11T04:23:29,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table616) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table858 2024-11-11T04:23:29,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table858) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table617 2024-11-11T04:23:29,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table617) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table859 2024-11-11T04:23:29,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table859) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table618 2024-11-11T04:23:29,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table618) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1390 2024-11-11T04:23:29,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table619 2024-11-11T04:23:29,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table619) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1157 2024-11-11T04:23:29,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1399 2024-11-11T04:23:29,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1158 2024-11-11T04:23:29,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1159 2024-11-11T04:23:29,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table240 2024-11-11T04:23:29,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table482 2024-11-11T04:23:29,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table241 2024-11-11T04:23:29,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table483 2024-11-11T04:23:29,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table242 2024-11-11T04:23:29,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table484 2024-11-11T04:23:29,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table243 2024-11-11T04:23:29,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table485 2024-11-11T04:23:29,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table244 2024-11-11T04:23:29,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table486 2024-11-11T04:23:29,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table245 2024-11-11T04:23:29,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table487 2024-11-11T04:23:29,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table246 2024-11-11T04:23:29,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table488 2024-11-11T04:23:29,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table247 2024-11-11T04:23:29,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table489 2024-11-11T04:23:29,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table248 2024-11-11T04:23:29,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table249 2024-11-11T04:23:29,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1308 2024-11-11T04:23:29,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1309 2024-11-11T04:23:29,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1300 2024-11-11T04:23:29,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1301 2024-11-11T04:23:29,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1302 2024-11-11T04:23:29,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1303 2024-11-11T04:23:29,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1304 2024-11-11T04:23:29,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table490 2024-11-11T04:23:29,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1305 2024-11-11T04:23:29,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table491 2024-11-11T04:23:29,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1306 2024-11-11T04:23:29,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table250 2024-11-11T04:23:29,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table492 2024-11-11T04:23:29,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1307 2024-11-11T04:23:29,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table471 2024-11-11T04:23:29,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table230 2024-11-11T04:23:29,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table472 2024-11-11T04:23:29,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table231 2024-11-11T04:23:29,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table473 2024-11-11T04:23:29,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table232 2024-11-11T04:23:29,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table474 2024-11-11T04:23:29,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table233 2024-11-11T04:23:29,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table475 2024-11-11T04:23:29,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table234 2024-11-11T04:23:29,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table476 2024-11-11T04:23:29,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table235 2024-11-11T04:23:29,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table477 2024-11-11T04:23:29,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table236 2024-11-11T04:23:29,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table478 2024-11-11T04:23:29,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table237 2024-11-11T04:23:29,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table479 2024-11-11T04:23:29,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table238 2024-11-11T04:23:29,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table239 2024-11-11T04:23:29,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table480 2024-11-11T04:23:29,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table481 2024-11-11T04:23:29,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table262 2024-11-11T04:23:29,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table263 2024-11-11T04:23:29,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table264 2024-11-11T04:23:29,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table265 2024-11-11T04:23:29,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table266 2024-11-11T04:23:29,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table267 2024-11-11T04:23:29,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table268 2024-11-11T04:23:29,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table269 2024-11-11T04:23:29,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table270 2024-11-11T04:23:29,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table271 2024-11-11T04:23:29,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table272 2024-11-11T04:23:29,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table251 2024-11-11T04:23:29,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table493 2024-11-11T04:23:29,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table252 2024-11-11T04:23:29,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table494 2024-11-11T04:23:29,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table253 2024-11-11T04:23:29,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table495 2024-11-11T04:23:29,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table254 2024-11-11T04:23:29,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table496 2024-11-11T04:23:29,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table255 2024-11-11T04:23:29,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table497 2024-11-11T04:23:29,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table256 2024-11-11T04:23:29,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table498 2024-11-11T04:23:29,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table257 2024-11-11T04:23:29,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table499 2024-11-11T04:23:29,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table258 2024-11-11T04:23:29,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table259 2024-11-11T04:23:29,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table260 2024-11-11T04:23:29,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table261 2024-11-11T04:23:29,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table680 2024-11-11T04:23:29,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table680) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table681 2024-11-11T04:23:29,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table681) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table440 2024-11-11T04:23:29,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table682 2024-11-11T04:23:29,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table682) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table441 2024-11-11T04:23:29,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table683 2024-11-11T04:23:29,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table683) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table200 2024-11-11T04:23:29,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table442 2024-11-11T04:23:29,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table684 2024-11-11T04:23:29,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table684) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1340 2024-11-11T04:23:29,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table201 2024-11-11T04:23:29,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table443 2024-11-11T04:23:29,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table685 2024-11-11T04:23:29,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table685) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1341 2024-11-11T04:23:29,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table202 2024-11-11T04:23:29,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table444 2024-11-11T04:23:29,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table686 2024-11-11T04:23:29,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table686) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1100 2024-11-11T04:23:29,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1342 2024-11-11T04:23:29,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table203 2024-11-11T04:23:29,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table445 2024-11-11T04:23:29,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table687 2024-11-11T04:23:29,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table687) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1101 2024-11-11T04:23:29,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1343 2024-11-11T04:23:29,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table204 2024-11-11T04:23:29,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table446 2024-11-11T04:23:29,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table688 2024-11-11T04:23:29,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table688) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table205 2024-11-11T04:23:29,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table447 2024-11-11T04:23:29,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table689 2024-11-11T04:23:29,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table689) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table206 2024-11-11T04:23:29,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table448 2024-11-11T04:23:29,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table207 2024-11-11T04:23:29,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table449 2024-11-11T04:23:29,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table208 2024-11-11T04:23:29,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table209 2024-11-11T04:23:29,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1102 2024-11-11T04:23:29,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1344 2024-11-11T04:23:29,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1103 2024-11-11T04:23:29,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1345 2024-11-11T04:23:29,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1104 2024-11-11T04:23:29,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1346 2024-11-11T04:23:29,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1105 2024-11-11T04:23:29,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1347 2024-11-11T04:23:29,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1106 2024-11-11T04:23:29,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1348 2024-11-11T04:23:29,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1107 2024-11-11T04:23:29,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1349 2024-11-11T04:23:29,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1108 2024-11-11T04:23:29,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table690 2024-11-11T04:23:29,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table690) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1109 2024-11-11T04:23:29,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table670 2024-11-11T04:23:29,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table670) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table671 2024-11-11T04:23:29,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table671) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table430 2024-11-11T04:23:29,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table672 2024-11-11T04:23:29,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table672) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table431 2024-11-11T04:23:29,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table673 2024-11-11T04:23:29,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table673) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table432 2024-11-11T04:23:29,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table674 2024-11-11T04:23:29,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table674) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1330 2024-11-11T04:23:29,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table433 2024-11-11T04:23:29,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table675 2024-11-11T04:23:29,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table675) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1331 2024-11-11T04:23:29,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table434 2024-11-11T04:23:29,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table676 2024-11-11T04:23:29,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table676) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1332 2024-11-11T04:23:29,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table435 2024-11-11T04:23:29,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table677 2024-11-11T04:23:29,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table677) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table436 2024-11-11T04:23:29,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table678 2024-11-11T04:23:29,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table678) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table437 2024-11-11T04:23:29,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table679 2024-11-11T04:23:29,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table679) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table438 2024-11-11T04:23:29,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table439 2024-11-11T04:23:29,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1333 2024-11-11T04:23:29,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1334 2024-11-11T04:23:29,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1335 2024-11-11T04:23:29,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1336 2024-11-11T04:23:29,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1337 2024-11-11T04:23:29,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1338 2024-11-11T04:23:29,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1339 2024-11-11T04:23:29,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table460 2024-11-11T04:23:29,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table461 2024-11-11T04:23:29,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table220 2024-11-11T04:23:29,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table462 2024-11-11T04:23:29,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table221 2024-11-11T04:23:29,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table463 2024-11-11T04:23:29,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table222 2024-11-11T04:23:29,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table464 2024-11-11T04:23:29,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table223 2024-11-11T04:23:29,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table465 2024-11-11T04:23:29,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table224 2024-11-11T04:23:29,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table466 2024-11-11T04:23:29,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1320 2024-11-11T04:23:29,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table225 2024-11-11T04:23:29,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table467 2024-11-11T04:23:29,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1321 2024-11-11T04:23:29,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table226 2024-11-11T04:23:29,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table468 2024-11-11T04:23:29,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table227 2024-11-11T04:23:29,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table469 2024-11-11T04:23:29,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table228 2024-11-11T04:23:29,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table229 2024-11-11T04:23:29,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1322 2024-11-11T04:23:29,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1323 2024-11-11T04:23:29,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1324 2024-11-11T04:23:29,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1325 2024-11-11T04:23:29,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1326 2024-11-11T04:23:29,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1327 2024-11-11T04:23:29,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1328 2024-11-11T04:23:29,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table470 2024-11-11T04:23:29,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1329 2024-11-11T04:23:29,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table691 2024-11-11T04:23:29,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table691) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table450 2024-11-11T04:23:29,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table692 2024-11-11T04:23:29,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table692) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table451 2024-11-11T04:23:29,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table693 2024-11-11T04:23:29,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table693) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table210 2024-11-11T04:23:29,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table452 2024-11-11T04:23:29,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table694 2024-11-11T04:23:29,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table694) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table211 2024-11-11T04:23:29,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table453 2024-11-11T04:23:29,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table695 2024-11-11T04:23:29,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table695) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table212 2024-11-11T04:23:29,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table454 2024-11-11T04:23:29,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table696 2024-11-11T04:23:29,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table696) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table213 2024-11-11T04:23:29,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table455 2024-11-11T04:23:29,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table697 2024-11-11T04:23:29,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table697) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table214 2024-11-11T04:23:29,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table456 2024-11-11T04:23:29,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table698 2024-11-11T04:23:29,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table698) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1310 2024-11-11T04:23:29,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table215 2024-11-11T04:23:29,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table457 2024-11-11T04:23:29,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table699 2024-11-11T04:23:29,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table699) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table216 2024-11-11T04:23:29,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table458 2024-11-11T04:23:29,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table217 2024-11-11T04:23:29,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table459 2024-11-11T04:23:29,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table218 2024-11-11T04:23:29,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table219 2024-11-11T04:23:29,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1319 2024-11-11T04:23:29,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1311 2024-11-11T04:23:29,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1312 2024-11-11T04:23:29,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1313 2024-11-11T04:23:29,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1314 2024-11-11T04:23:29,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1315 2024-11-11T04:23:29,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1316 2024-11-11T04:23:29,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1317 2024-11-11T04:23:29,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1318 2024-11-11T04:23:29,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table196 2024-11-11T04:23:29,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table197 2024-11-11T04:23:29,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table198 2024-11-11T04:23:29,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table199 2024-11-11T04:23:29,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table163 2024-11-11T04:23:29,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table164 2024-11-11T04:23:29,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table165 2024-11-11T04:23:29,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table166 2024-11-11T04:23:29,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table167 2024-11-11T04:23:29,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table168 2024-11-11T04:23:29,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table169 2024-11-11T04:23:29,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table170 2024-11-11T04:23:29,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table171 2024-11-11T04:23:29,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table172 2024-11-11T04:23:29,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table173 2024-11-11T04:23:29,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-11T04:23:29,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table394 2024-11-11T04:23:29,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-11T04:23:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table395 2024-11-11T04:23:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-11T04:23:29,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table396 2024-11-11T04:23:29,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table155 2024-11-11T04:23:29,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table397 2024-11-11T04:23:29,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table156 2024-11-11T04:23:29,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table398 2024-11-11T04:23:29,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table157 2024-11-11T04:23:29,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table399 2024-11-11T04:23:29,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table158 2024-11-11T04:23:29,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table159 2024-11-11T04:23:29,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table160 2024-11-11T04:23:29,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table161 2024-11-11T04:23:29,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table162 2024-11-11T04:23:29,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table185 2024-11-11T04:23:29,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table186 2024-11-11T04:23:29,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table187 2024-11-11T04:23:29,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table188 2024-11-11T04:23:29,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table189 2024-11-11T04:23:29,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table190 2024-11-11T04:23:29,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table191 2024-11-11T04:23:29,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table192 2024-11-11T04:23:29,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table193 2024-11-11T04:23:29,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table194 2024-11-11T04:23:29,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table195 2024-11-11T04:23:29,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table174 2024-11-11T04:23:29,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table175 2024-11-11T04:23:29,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table176 2024-11-11T04:23:29,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table177 2024-11-11T04:23:29,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table178 2024-11-11T04:23:29,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table179 2024-11-11T04:23:29,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table180 2024-11-11T04:23:29,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table181 2024-11-11T04:23:29,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table182 2024-11-11T04:23:29,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table183 2024-11-11T04:23:29,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table184 2024-11-11T04:23:29,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table800 2024-11-11T04:23:29,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table800) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table801 2024-11-11T04:23:29,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table801) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table802 2024-11-11T04:23:29,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table802) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table803 2024-11-11T04:23:29,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table803) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table804 2024-11-11T04:23:29,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table804) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table805 2024-11-11T04:23:29,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table805) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table806 2024-11-11T04:23:29,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table806) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table807 2024-11-11T04:23:29,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table807) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table808 2024-11-11T04:23:29,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table808) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table809 2024-11-11T04:23:29,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table809) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table820 2024-11-11T04:23:29,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table820) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table821 2024-11-11T04:23:29,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table821) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table822 2024-11-11T04:23:29,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table822) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table823 2024-11-11T04:23:29,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table823) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table824 2024-11-11T04:23:29,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table824) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table825 2024-11-11T04:23:29,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table825) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table826 2024-11-11T04:23:29,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table826) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table827 2024-11-11T04:23:29,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table827) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table828 2024-11-11T04:23:29,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table828) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table829 2024-11-11T04:23:29,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table829) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1193 2024-11-11T04:23:29,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1194 2024-11-11T04:23:29,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1195 2024-11-11T04:23:29,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1196 2024-11-11T04:23:29,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1197 2024-11-11T04:23:29,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1198 2024-11-11T04:23:29,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1199 2024-11-11T04:23:29,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table810 2024-11-11T04:23:29,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table810) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table811 2024-11-11T04:23:29,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table811) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table812 2024-11-11T04:23:29,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table812) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table813 2024-11-11T04:23:29,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table813) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table814 2024-11-11T04:23:29,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table814) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1190 2024-11-11T04:23:29,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table815 2024-11-11T04:23:29,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table815) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1191 2024-11-11T04:23:29,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table816 2024-11-11T04:23:29,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table816) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1192 2024-11-11T04:23:29,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table817 2024-11-11T04:23:29,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table817) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table818 2024-11-11T04:23:29,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table818) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table819 2024-11-11T04:23:29,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table819) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1260 2024-11-11T04:23:29,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1261 2024-11-11T04:23:29,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table760 2024-11-11T04:23:29,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table760) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1020 2024-11-11T04:23:29,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1020) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1262 2024-11-11T04:23:29,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table761 2024-11-11T04:23:29,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table761) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1021 2024-11-11T04:23:29,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1021) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1263 2024-11-11T04:23:29,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table520 2024-11-11T04:23:29,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table520) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table762 2024-11-11T04:23:29,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table762) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1022 2024-11-11T04:23:29,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1022) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1264 2024-11-11T04:23:29,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table521 2024-11-11T04:23:29,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table521) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table763 2024-11-11T04:23:29,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table763) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1023 2024-11-11T04:23:29,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1023) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1265 2024-11-11T04:23:29,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table522 2024-11-11T04:23:29,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table522) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table764 2024-11-11T04:23:29,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table764) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1024 2024-11-11T04:23:29,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1024) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1266 2024-11-11T04:23:29,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table523 2024-11-11T04:23:29,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table523) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table765 2024-11-11T04:23:29,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table765) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table524 2024-11-11T04:23:29,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table524) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table766 2024-11-11T04:23:29,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table766) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table525 2024-11-11T04:23:29,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table525) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table767 2024-11-11T04:23:29,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table767) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table526 2024-11-11T04:23:29,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table526) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table768 2024-11-11T04:23:29,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table768) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table527 2024-11-11T04:23:29,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table527) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table769 2024-11-11T04:23:29,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table769) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table528 2024-11-11T04:23:29,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table528) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table529 2024-11-11T04:23:29,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table529) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-11T04:23:29,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-11T04:23:29,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-11T04:23:29,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-11T04:23:29,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1025 2024-11-11T04:23:29,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1025) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1267 2024-11-11T04:23:29,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-11T04:23:29,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1026 2024-11-11T04:23:29,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1026) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1268 2024-11-11T04:23:29,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-11T04:23:29,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1027 2024-11-11T04:23:29,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1027) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1269 2024-11-11T04:23:29,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-11T04:23:29,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1028 2024-11-11T04:23:29,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1028) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-11T04:23:29,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1029 2024-11-11T04:23:29,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1029) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-11T04:23:29,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-11T04:23:29,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table990 2024-11-11T04:23:29,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table990) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1250 2024-11-11T04:23:29,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table991 2024-11-11T04:23:29,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table991) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1251 2024-11-11T04:23:29,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table750 2024-11-11T04:23:29,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table750) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table992 2024-11-11T04:23:29,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table992) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1010 2024-11-11T04:23:29,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1010) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1252 2024-11-11T04:23:29,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table751 2024-11-11T04:23:29,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table751) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table993 2024-11-11T04:23:29,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table993) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1011 2024-11-11T04:23:29,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1011) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1253 2024-11-11T04:23:29,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table510 2024-11-11T04:23:29,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table510) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table752 2024-11-11T04:23:29,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table752) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table994 2024-11-11T04:23:29,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table994) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1012 2024-11-11T04:23:29,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1012) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1254 2024-11-11T04:23:29,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table511 2024-11-11T04:23:29,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table511) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table753 2024-11-11T04:23:29,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table753) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table995 2024-11-11T04:23:29,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table995) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1013 2024-11-11T04:23:29,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1013) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1255 2024-11-11T04:23:29,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table512 2024-11-11T04:23:29,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table512) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table754 2024-11-11T04:23:29,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table754) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table996 2024-11-11T04:23:29,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table996) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table513 2024-11-11T04:23:29,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table513) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table755 2024-11-11T04:23:29,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table755) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table997 2024-11-11T04:23:29,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table997) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table514 2024-11-11T04:23:29,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table514) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table756 2024-11-11T04:23:29,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table756) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table998 2024-11-11T04:23:29,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table998) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table515 2024-11-11T04:23:29,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table515) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table757 2024-11-11T04:23:29,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table757) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table999 2024-11-11T04:23:29,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table999) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table516 2024-11-11T04:23:29,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table516) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table758 2024-11-11T04:23:29,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table758) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table517 2024-11-11T04:23:29,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table517) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table759 2024-11-11T04:23:29,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table759) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table518 2024-11-11T04:23:29,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table518) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table519 2024-11-11T04:23:29,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table519) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-11T04:23:29,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-11T04:23:29,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-11T04:23:29,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1014 2024-11-11T04:23:29,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1014) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1256 2024-11-11T04:23:29,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-11T04:23:29,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1015 2024-11-11T04:23:29,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1015) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1257 2024-11-11T04:23:29,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-11T04:23:29,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1016 2024-11-11T04:23:29,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1016) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1258 2024-11-11T04:23:29,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-11T04:23:29,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1017 2024-11-11T04:23:29,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1017) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1259 2024-11-11T04:23:29,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-11T04:23:29,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1018 2024-11-11T04:23:29,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1018) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-11T04:23:29,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1019 2024-11-11T04:23:29,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1019) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-11T04:23:29,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-11T04:23:29,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-11T04:23:29,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table780 2024-11-11T04:23:29,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table780) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table781 2024-11-11T04:23:29,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table781) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table540 2024-11-11T04:23:29,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table540) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table782 2024-11-11T04:23:29,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table782) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1240 2024-11-11T04:23:29,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table541 2024-11-11T04:23:29,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table541) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table783 2024-11-11T04:23:29,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table783) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1241 2024-11-11T04:23:29,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table300 2024-11-11T04:23:29,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table542 2024-11-11T04:23:29,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table542) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table784 2024-11-11T04:23:29,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table784) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1000 2024-11-11T04:23:29,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1000) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1242 2024-11-11T04:23:29,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table301 2024-11-11T04:23:29,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table543 2024-11-11T04:23:29,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table543) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table785 2024-11-11T04:23:29,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table785) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1001 2024-11-11T04:23:29,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1001) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1243 2024-11-11T04:23:29,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table302 2024-11-11T04:23:29,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table544 2024-11-11T04:23:29,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table544) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table786 2024-11-11T04:23:29,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table786) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1002 2024-11-11T04:23:29,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1002) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1244 2024-11-11T04:23:29,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table303 2024-11-11T04:23:29,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table545 2024-11-11T04:23:29,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table545) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table787 2024-11-11T04:23:29,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table787) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table304 2024-11-11T04:23:29,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table546 2024-11-11T04:23:29,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table546) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table788 2024-11-11T04:23:29,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table788) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table305 2024-11-11T04:23:29,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table547 2024-11-11T04:23:29,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table547) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table789 2024-11-11T04:23:29,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table789) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table306 2024-11-11T04:23:29,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table548 2024-11-11T04:23:29,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table548) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table307 2024-11-11T04:23:29,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table549 2024-11-11T04:23:29,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table549) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table308 2024-11-11T04:23:29,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table309 2024-11-11T04:23:29,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-11T04:23:29,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-11T04:23:29,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1003 2024-11-11T04:23:29,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1003) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1245 2024-11-11T04:23:29,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-11T04:23:29,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1004 2024-11-11T04:23:29,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1004) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1246 2024-11-11T04:23:29,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-11T04:23:29,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1005 2024-11-11T04:23:29,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1005) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1247 2024-11-11T04:23:29,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-11T04:23:29,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1006 2024-11-11T04:23:29,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1006) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1248 2024-11-11T04:23:29,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-11T04:23:29,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1007 2024-11-11T04:23:29,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1007) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1249 2024-11-11T04:23:29,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-11T04:23:29,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1008 2024-11-11T04:23:29,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1008) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-11T04:23:29,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1009 2024-11-11T04:23:29,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1009) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-11T04:23:29,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-11T04:23:29,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-11T04:23:29,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table770 2024-11-11T04:23:29,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table770) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table771 2024-11-11T04:23:29,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table771) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table530 2024-11-11T04:23:29,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table530) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table772 2024-11-11T04:23:29,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table772) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1230 2024-11-11T04:23:29,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table531 2024-11-11T04:23:29,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table531) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table773 2024-11-11T04:23:29,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table773) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1231 2024-11-11T04:23:29,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table532 2024-11-11T04:23:29,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table532) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table774 2024-11-11T04:23:29,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table774) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1232 2024-11-11T04:23:29,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table533 2024-11-11T04:23:29,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table533) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table775 2024-11-11T04:23:29,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table775) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1233 2024-11-11T04:23:29,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table534 2024-11-11T04:23:29,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table534) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table776 2024-11-11T04:23:29,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table776) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table535 2024-11-11T04:23:29,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table535) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table777 2024-11-11T04:23:29,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table777) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table536 2024-11-11T04:23:29,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table536) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table778 2024-11-11T04:23:29,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table778) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table537 2024-11-11T04:23:29,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table537) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table779 2024-11-11T04:23:29,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table779) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table538 2024-11-11T04:23:29,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table538) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table539 2024-11-11T04:23:29,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table539) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-11T04:23:29,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1234 2024-11-11T04:23:29,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-11T04:23:29,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1235 2024-11-11T04:23:29,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-11T04:23:29,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1236 2024-11-11T04:23:29,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-11T04:23:29,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1237 2024-11-11T04:23:29,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-11T04:23:29,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1238 2024-11-11T04:23:29,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-11T04:23:29,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1239 2024-11-11T04:23:29,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-11T04:23:29,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-11T04:23:29,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1061 2024-11-11T04:23:29,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1061) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1062 2024-11-11T04:23:29,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1062) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1063 2024-11-11T04:23:29,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1063) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1064 2024-11-11T04:23:29,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1064) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1065 2024-11-11T04:23:29,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1065) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table960 2024-11-11T04:23:29,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table960) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1066 2024-11-11T04:23:29,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1066) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table961 2024-11-11T04:23:29,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table961) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1067 2024-11-11T04:23:29,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1067) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table720 2024-11-11T04:23:29,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table720) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table962 2024-11-11T04:23:29,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table962) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1068 2024-11-11T04:23:29,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1068) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table721 2024-11-11T04:23:29,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table721) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table963 2024-11-11T04:23:29,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table963) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table722 2024-11-11T04:23:29,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table722) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table964 2024-11-11T04:23:29,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table964) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table723 2024-11-11T04:23:29,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table723) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table965 2024-11-11T04:23:29,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table965) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table724 2024-11-11T04:23:29,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table724) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table966 2024-11-11T04:23:29,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table966) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table725 2024-11-11T04:23:29,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table725) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table967 2024-11-11T04:23:29,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table967) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table726 2024-11-11T04:23:29,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table726) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table968 2024-11-11T04:23:29,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table968) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table727 2024-11-11T04:23:29,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table727) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table969 2024-11-11T04:23:29,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table969) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table728 2024-11-11T04:23:29,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table728) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1060 2024-11-11T04:23:29,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1060) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table729 2024-11-11T04:23:29,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table729) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1069 2024-11-11T04:23:29,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1069) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1050 2024-11-11T04:23:29,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1050) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1292 2024-11-11T04:23:29,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1051 2024-11-11T04:23:29,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1051) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1293 2024-11-11T04:23:29,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1052 2024-11-11T04:23:29,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1052) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1294 2024-11-11T04:23:29,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1053 2024-11-11T04:23:29,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1053) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1295 2024-11-11T04:23:29,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1054 2024-11-11T04:23:29,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1054) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1296 2024-11-11T04:23:29,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1055 2024-11-11T04:23:29,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1055) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1297 2024-11-11T04:23:29,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table950 2024-11-11T04:23:29,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table950) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1056 2024-11-11T04:23:29,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1056) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1298 2024-11-11T04:23:29,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table951 2024-11-11T04:23:29,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table951) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1057 2024-11-11T04:23:29,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1057) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1299 2024-11-11T04:23:29,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table710 2024-11-11T04:23:29,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table710) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table952 2024-11-11T04:23:29,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table952) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table711 2024-11-11T04:23:29,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table711) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table953 2024-11-11T04:23:29,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table953) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table712 2024-11-11T04:23:29,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table712) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table954 2024-11-11T04:23:29,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table954) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table713 2024-11-11T04:23:29,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table713) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table955 2024-11-11T04:23:29,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table955) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table714 2024-11-11T04:23:29,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table714) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table956 2024-11-11T04:23:29,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table956) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table715 2024-11-11T04:23:29,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table715) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table957 2024-11-11T04:23:29,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table957) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table716 2024-11-11T04:23:29,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table716) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table958 2024-11-11T04:23:29,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table958) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1290 2024-11-11T04:23:29,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table717 2024-11-11T04:23:29,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table717) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table959 2024-11-11T04:23:29,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table959) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1291 2024-11-11T04:23:29,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table718 2024-11-11T04:23:29,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table718) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table719 2024-11-11T04:23:29,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table719) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1058 2024-11-11T04:23:29,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1058) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1059 2024-11-11T04:23:29,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1059) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1281 2024-11-11T04:23:29,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1040 2024-11-11T04:23:29,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1040) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1282 2024-11-11T04:23:29,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1041 2024-11-11T04:23:29,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1041) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1283 2024-11-11T04:23:29,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table980 2024-11-11T04:23:29,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table980) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1042 2024-11-11T04:23:29,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1042) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1284 2024-11-11T04:23:29,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table981 2024-11-11T04:23:29,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table981) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1043 2024-11-11T04:23:29,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1043) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1285 2024-11-11T04:23:29,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table740 2024-11-11T04:23:29,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table740) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table982 2024-11-11T04:23:29,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table982) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1044 2024-11-11T04:23:29,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1044) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1286 2024-11-11T04:23:29,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table741 2024-11-11T04:23:29,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table741) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table983 2024-11-11T04:23:29,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table983) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1045 2024-11-11T04:23:29,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1045) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1287 2024-11-11T04:23:29,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table500 2024-11-11T04:23:29,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table500) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table742 2024-11-11T04:23:29,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table742) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table984 2024-11-11T04:23:29,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table984) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1046 2024-11-11T04:23:29,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1046) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1288 2024-11-11T04:23:29,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table501 2024-11-11T04:23:29,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table501) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table743 2024-11-11T04:23:29,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table743) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table985 2024-11-11T04:23:29,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table985) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table502 2024-11-11T04:23:29,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table502) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table744 2024-11-11T04:23:29,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table744) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table986 2024-11-11T04:23:29,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table986) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table503 2024-11-11T04:23:29,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table503) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table745 2024-11-11T04:23:29,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table745) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table987 2024-11-11T04:23:29,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table987) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table504 2024-11-11T04:23:29,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table504) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table746 2024-11-11T04:23:29,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table746) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table988 2024-11-11T04:23:29,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table988) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table505 2024-11-11T04:23:29,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table505) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table747 2024-11-11T04:23:29,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table747) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table989 2024-11-11T04:23:29,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table989) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table506 2024-11-11T04:23:29,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table506) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table748 2024-11-11T04:23:29,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table748) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table507 2024-11-11T04:23:29,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table507) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table749 2024-11-11T04:23:29,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table749) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table508 2024-11-11T04:23:29,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table508) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1280 2024-11-11T04:23:29,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table509 2024-11-11T04:23:29,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table509) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1047 2024-11-11T04:23:29,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1047) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1289 2024-11-11T04:23:29,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1048 2024-11-11T04:23:29,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1048) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1049 2024-11-11T04:23:29,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1049) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1270 2024-11-11T04:23:29,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1271 2024-11-11T04:23:29,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1030 2024-11-11T04:23:29,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1030) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1272 2024-11-11T04:23:29,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1031 2024-11-11T04:23:29,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1031) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1273 2024-11-11T04:23:29,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table970 2024-11-11T04:23:29,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table970) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1032 2024-11-11T04:23:29,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1032) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1274 2024-11-11T04:23:29,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table971 2024-11-11T04:23:29,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table971) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1033 2024-11-11T04:23:29,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1033) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1275 2024-11-11T04:23:29,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table730 2024-11-11T04:23:29,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table730) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table972 2024-11-11T04:23:29,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table972) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1034 2024-11-11T04:23:29,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1034) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1276 2024-11-11T04:23:29,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table731 2024-11-11T04:23:29,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table731) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table973 2024-11-11T04:23:29,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table973) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1035 2024-11-11T04:23:29,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1035) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1277 2024-11-11T04:23:29,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table732 2024-11-11T04:23:29,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table732) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table974 2024-11-11T04:23:29,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table974) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table733 2024-11-11T04:23:29,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table733) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table975 2024-11-11T04:23:29,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table975) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table734 2024-11-11T04:23:29,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table734) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table976 2024-11-11T04:23:29,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table976) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table735 2024-11-11T04:23:29,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table735) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table977 2024-11-11T04:23:29,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table977) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table736 2024-11-11T04:23:29,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table736) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table978 2024-11-11T04:23:29,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table978) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table737 2024-11-11T04:23:29,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table737) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table979 2024-11-11T04:23:29,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table979) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table738 2024-11-11T04:23:29,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table738) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table739 2024-11-11T04:23:29,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table739) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1036 2024-11-11T04:23:29,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1036) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1278 2024-11-11T04:23:29,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1037 2024-11-11T04:23:29,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1037) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1279 2024-11-11T04:23:29,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1038 2024-11-11T04:23:29,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1038) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1039 2024-11-11T04:23:29,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1039) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-11T04:23:29,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table361 2024-11-11T04:23:29,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-11T04:23:29,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-11T04:23:29,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table362 2024-11-11T04:23:29,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-11T04:23:29,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-11T04:23:29,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table363 2024-11-11T04:23:29,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-11T04:23:29,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-11T04:23:29,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table364 2024-11-11T04:23:29,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-11T04:23:29,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-11T04:23:29,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table365 2024-11-11T04:23:29,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-11T04:23:29,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-11T04:23:29,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table366 2024-11-11T04:23:29,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-11T04:23:29,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-11T04:23:29,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table367 2024-11-11T04:23:29,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-11T04:23:29,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table368 2024-11-11T04:23:29,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1420 2024-11-11T04:23:29,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-11T04:23:29,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table369 2024-11-11T04:23:29,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-11T04:23:29,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-11T04:23:29,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1429 2024-11-11T04:23:29,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1421 2024-11-11T04:23:29,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1422 2024-11-11T04:23:29,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1423 2024-11-11T04:23:29,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1424 2024-11-11T04:23:29,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1425 2024-11-11T04:23:29,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-11T04:23:29,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1426 2024-11-11T04:23:29,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-11T04:23:29,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table370 2024-11-11T04:23:29,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1427 2024-11-11T04:23:29,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-11T04:23:29,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table371 2024-11-11T04:23:29,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1428 2024-11-11T04:23:29,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table350 2024-11-11T04:23:29,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table592 2024-11-11T04:23:29,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table592) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table351 2024-11-11T04:23:29,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table593 2024-11-11T04:23:29,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table593) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-11T04:23:29,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table352 2024-11-11T04:23:29,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table594 2024-11-11T04:23:29,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table594) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-11T04:23:29,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table353 2024-11-11T04:23:29,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table595 2024-11-11T04:23:29,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table595) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-11T04:23:29,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table354 2024-11-11T04:23:29,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table596 2024-11-11T04:23:29,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table596) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-11T04:23:29,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table355 2024-11-11T04:23:29,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table597 2024-11-11T04:23:29,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table597) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-11T04:23:29,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table356 2024-11-11T04:23:29,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table598 2024-11-11T04:23:29,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table598) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-11T04:23:29,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table357 2024-11-11T04:23:29,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table599 2024-11-11T04:23:29,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table599) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-11T04:23:29,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table358 2024-11-11T04:23:29,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-11T04:23:29,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table359 2024-11-11T04:23:29,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-11T04:23:29,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-11T04:23:29,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1418 2024-11-11T04:23:29,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1419 2024-11-11T04:23:29,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1410 2024-11-11T04:23:29,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1411 2024-11-11T04:23:29,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1412 2024-11-11T04:23:29,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1413 2024-11-11T04:23:29,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1414 2024-11-11T04:23:29,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1415 2024-11-11T04:23:29,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1416 2024-11-11T04:23:29,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table360 2024-11-11T04:23:29,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1417 2024-11-11T04:23:29,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-11T04:23:29,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table383 2024-11-11T04:23:29,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-11T04:23:29,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table384 2024-11-11T04:23:29,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-11T04:23:29,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table385 2024-11-11T04:23:29,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-11T04:23:29,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table386 2024-11-11T04:23:29,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-11T04:23:29,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table387 2024-11-11T04:23:29,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-11T04:23:29,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table388 2024-11-11T04:23:29,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-11T04:23:29,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table389 2024-11-11T04:23:29,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-11T04:23:29,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-11T04:23:29,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:29,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1407 2024-11-11T04:23:29,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:29,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1408 2024-11-11T04:23:29,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:29,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1409 2024-11-11T04:23:29,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:29,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1400 2024-11-11T04:23:29,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1401 2024-11-11T04:23:29,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1402 2024-11-11T04:23:29,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:29,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table390 2024-11-11T04:23:29,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1403 2024-11-11T04:23:29,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:29,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table391 2024-11-11T04:23:29,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1404 2024-11-11T04:23:29,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:29,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-11T04:23:29,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table392 2024-11-11T04:23:29,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1405 2024-11-11T04:23:29,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:29,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-11T04:23:29,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table393 2024-11-11T04:23:29,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1406 2024-11-11T04:23:29,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-11T04:23:29,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table372 2024-11-11T04:23:29,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-11T04:23:29,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table373 2024-11-11T04:23:29,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-11T04:23:29,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table374 2024-11-11T04:23:29,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-11T04:23:29,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table375 2024-11-11T04:23:29,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-11T04:23:29,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table376 2024-11-11T04:23:29,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-11T04:23:29,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table377 2024-11-11T04:23:29,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-11T04:23:29,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table378 2024-11-11T04:23:29,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-11T04:23:29,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table379 2024-11-11T04:23:29,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-11T04:23:29,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-11T04:23:29,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table380 2024-11-11T04:23:29,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table381 2024-11-11T04:23:29,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-11T04:23:29,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table382 2024-11-11T04:23:29,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-11T04:23:29,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-11T04:23:29,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table560 2024-11-11T04:23:29,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table560) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-11T04:23:29,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table561 2024-11-11T04:23:29,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table561) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table320 2024-11-11T04:23:29,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table562 2024-11-11T04:23:29,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table562) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table321 2024-11-11T04:23:29,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table563 2024-11-11T04:23:29,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table563) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table322 2024-11-11T04:23:29,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table564 2024-11-11T04:23:29,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table564) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1220 2024-11-11T04:23:29,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table323 2024-11-11T04:23:29,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table565 2024-11-11T04:23:29,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table565) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1221 2024-11-11T04:23:29,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table324 2024-11-11T04:23:29,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table566 2024-11-11T04:23:29,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table566) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1222 2024-11-11T04:23:29,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table325 2024-11-11T04:23:29,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table567 2024-11-11T04:23:29,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table567) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table326 2024-11-11T04:23:29,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table568 2024-11-11T04:23:29,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table568) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table327 2024-11-11T04:23:29,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table569 2024-11-11T04:23:29,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table569) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table328 2024-11-11T04:23:29,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table329 2024-11-11T04:23:29,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1223 2024-11-11T04:23:29,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-11T04:23:29,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1224 2024-11-11T04:23:29,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-11T04:23:29,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1225 2024-11-11T04:23:29,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-11T04:23:29,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1226 2024-11-11T04:23:29,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-11T04:23:29,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1227 2024-11-11T04:23:29,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-11T04:23:29,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1228 2024-11-11T04:23:29,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-11T04:23:29,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1229 2024-11-11T04:23:29,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-11T04:23:29,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-11T04:23:29,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table790 2024-11-11T04:23:29,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table790) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-11T04:23:29,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table791 2024-11-11T04:23:29,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table791) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-11T04:23:29,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table550 2024-11-11T04:23:29,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table550) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table792 2024-11-11T04:23:29,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table792) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-11T04:23:29,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table551 2024-11-11T04:23:29,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table551) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table793 2024-11-11T04:23:29,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table793) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table310 2024-11-11T04:23:29,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table552 2024-11-11T04:23:29,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table552) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table794 2024-11-11T04:23:29,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table794) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table311 2024-11-11T04:23:29,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table553 2024-11-11T04:23:29,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table553) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table795 2024-11-11T04:23:29,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table795) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table312 2024-11-11T04:23:29,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table554 2024-11-11T04:23:29,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table554) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table796 2024-11-11T04:23:29,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table796) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1210 2024-11-11T04:23:29,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table313 2024-11-11T04:23:29,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table555 2024-11-11T04:23:29,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table555) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table797 2024-11-11T04:23:29,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table797) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1211 2024-11-11T04:23:29,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table314 2024-11-11T04:23:29,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table556 2024-11-11T04:23:29,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table556) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table798 2024-11-11T04:23:29,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table798) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table315 2024-11-11T04:23:29,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table557 2024-11-11T04:23:29,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table557) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table799 2024-11-11T04:23:29,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table799) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table316 2024-11-11T04:23:29,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table558 2024-11-11T04:23:29,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table558) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table317 2024-11-11T04:23:29,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table559 2024-11-11T04:23:29,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table559) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table318 2024-11-11T04:23:29,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table319 2024-11-11T04:23:29,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1212 2024-11-11T04:23:29,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1213 2024-11-11T04:23:29,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-11T04:23:29,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1214 2024-11-11T04:23:29,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-11T04:23:29,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1215 2024-11-11T04:23:29,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-11T04:23:29,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1216 2024-11-11T04:23:29,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-11T04:23:29,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1217 2024-11-11T04:23:29,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-11T04:23:29,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1218 2024-11-11T04:23:29,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-11T04:23:29,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1219 2024-11-11T04:23:29,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-11T04:23:29,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table581 2024-11-11T04:23:29,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table581) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-11T04:23:29,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table340 2024-11-11T04:23:29,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table582 2024-11-11T04:23:29,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table582) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-11T04:23:29,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table341 2024-11-11T04:23:29,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table583 2024-11-11T04:23:29,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table583) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-11T04:23:29,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-11T04:23:29,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table342 2024-11-11T04:23:29,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table584 2024-11-11T04:23:29,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table584) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-11T04:23:29,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-11T04:23:29,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table343 2024-11-11T04:23:29,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table585 2024-11-11T04:23:29,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table585) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-11T04:23:29,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table344 2024-11-11T04:23:29,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table586 2024-11-11T04:23:29,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table586) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-11T04:23:29,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table345 2024-11-11T04:23:29,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table587 2024-11-11T04:23:29,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table587) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-11T04:23:29,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table346 2024-11-11T04:23:29,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table588 2024-11-11T04:23:29,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table588) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1200 2024-11-11T04:23:29,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-11T04:23:29,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table347 2024-11-11T04:23:29,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table589 2024-11-11T04:23:29,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table589) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-11T04:23:29,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table348 2024-11-11T04:23:29,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-11T04:23:29,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table349 2024-11-11T04:23:29,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-11T04:23:29,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-11T04:23:29,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1209 2024-11-11T04:23:29,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1201 2024-11-11T04:23:29,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1202 2024-11-11T04:23:29,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1203 2024-11-11T04:23:29,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-11T04:23:29,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1204 2024-11-11T04:23:29,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-11T04:23:29,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1205 2024-11-11T04:23:29,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-11T04:23:29,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1206 2024-11-11T04:23:29,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-11T04:23:29,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table590 2024-11-11T04:23:29,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table590) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1207 2024-11-11T04:23:29,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-11T04:23:29,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table591 2024-11-11T04:23:29,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table591) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1208 2024-11-11T04:23:29,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-11T04:23:29,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table570 2024-11-11T04:23:29,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table570) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-11T04:23:29,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table571 2024-11-11T04:23:29,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table571) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-11T04:23:29,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table330 2024-11-11T04:23:29,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table572 2024-11-11T04:23:29,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table572) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-11T04:23:29,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table331 2024-11-11T04:23:29,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table573 2024-11-11T04:23:29,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table573) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-11T04:23:29,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table332 2024-11-11T04:23:29,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table574 2024-11-11T04:23:29,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table574) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-11T04:23:29,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table333 2024-11-11T04:23:29,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table575 2024-11-11T04:23:29,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table575) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table334 2024-11-11T04:23:29,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table576 2024-11-11T04:23:29,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table576) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1430 2024-11-11T04:23:29,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table335 2024-11-11T04:23:29,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table577 2024-11-11T04:23:29,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table577) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1431 2024-11-11T04:23:29,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table336 2024-11-11T04:23:29,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table578 2024-11-11T04:23:29,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table578) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table337 2024-11-11T04:23:29,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table579 2024-11-11T04:23:29,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table579) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table338 2024-11-11T04:23:29,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table339 2024-11-11T04:23:29,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-11T04:23:29,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-11T04:23:29,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,479 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,479 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,479 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-11T04:23:29,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,479 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,479 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,479 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-11T04:23:29,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,479 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,479 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,479 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table580 2024-11-11T04:23:29,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table580) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table284 2024-11-11T04:23:29,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table285 2024-11-11T04:23:29,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table286 2024-11-11T04:23:29,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table287 2024-11-11T04:23:29,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table288 2024-11-11T04:23:29,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table289 2024-11-11T04:23:29,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table290 2024-11-11T04:23:29,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table291 2024-11-11T04:23:29,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table292 2024-11-11T04:23:29,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table293 2024-11-11T04:23:29,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table294 2024-11-11T04:23:29,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table273 2024-11-11T04:23:29,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table274 2024-11-11T04:23:29,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table275 2024-11-11T04:23:29,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table276 2024-11-11T04:23:29,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table277 2024-11-11T04:23:29,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table278 2024-11-11T04:23:29,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table279 2024-11-11T04:23:29,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table280 2024-11-11T04:23:29,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table281 2024-11-11T04:23:29,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table282 2024-11-11T04:23:29,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table283 2024-11-11T04:23:29,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table295 2024-11-11T04:23:29,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table296 2024-11-11T04:23:29,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table297 2024-11-11T04:23:29,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table298 2024-11-11T04:23:29,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table299 2024-11-11T04:23:29,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table920 2024-11-11T04:23:29,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table920) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table921 2024-11-11T04:23:29,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table921) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table922 2024-11-11T04:23:29,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table922) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table923 2024-11-11T04:23:29,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table923) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table924 2024-11-11T04:23:29,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table924) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table925 2024-11-11T04:23:29,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table925) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table926 2024-11-11T04:23:29,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table926) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table927 2024-11-11T04:23:29,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table927) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table928 2024-11-11T04:23:29,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table928) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table929 2024-11-11T04:23:29,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table929) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1094 2024-11-11T04:23:29,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1094) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1095 2024-11-11T04:23:29,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1095) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1096 2024-11-11T04:23:29,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1096) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1097 2024-11-11T04:23:29,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1097) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1098 2024-11-11T04:23:29,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1098) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1099 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1099) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table910 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table910) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table911 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table911) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table912 2024-11-11T04:23:29,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table912) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1090 2024-11-11T04:23:29,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1090) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table913 2024-11-11T04:23:29,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table913) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1091 2024-11-11T04:23:29,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1091) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table914 2024-11-11T04:23:29,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table914) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1092 2024-11-11T04:23:29,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1092) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table915 2024-11-11T04:23:29,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table915) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1093 2024-11-11T04:23:29,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1093) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table916 2024-11-11T04:23:29,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table916) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table917 2024-11-11T04:23:29,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table917) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table918 2024-11-11T04:23:29,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table918) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table919 2024-11-11T04:23:29,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table919) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1083 2024-11-11T04:23:29,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1083) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1084 2024-11-11T04:23:29,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1084) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1085 2024-11-11T04:23:29,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1085) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1086 2024-11-11T04:23:29,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1086) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1087 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1087) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1088 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1088) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1089 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1089) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table940 2024-11-11T04:23:29,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table940) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table941 2024-11-11T04:23:29,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table941) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table700 2024-11-11T04:23:29,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table700) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table942 2024-11-11T04:23:29,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table942) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table701 2024-11-11T04:23:29,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table701) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table943 2024-11-11T04:23:29,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table943) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table702 2024-11-11T04:23:29,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table702) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table944 2024-11-11T04:23:29,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table944) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table703 2024-11-11T04:23:29,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table703) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table945 2024-11-11T04:23:29,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table945) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table704 2024-11-11T04:23:29,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table704) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table946 2024-11-11T04:23:29,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table946) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1080 2024-11-11T04:23:29,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1080) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table705 2024-11-11T04:23:29,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table705) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table947 2024-11-11T04:23:29,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table947) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1081 2024-11-11T04:23:29,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1081) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table706 2024-11-11T04:23:29,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table706) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table948 2024-11-11T04:23:29,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table948) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1082 2024-11-11T04:23:29,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1082) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table707 2024-11-11T04:23:29,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table707) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table949 2024-11-11T04:23:29,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table949) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table708 2024-11-11T04:23:29,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table708) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table709 2024-11-11T04:23:29,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table709) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1072 2024-11-11T04:23:29,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1072) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1073 2024-11-11T04:23:29,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1073) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1074 2024-11-11T04:23:29,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1074) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1075 2024-11-11T04:23:29,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1075) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1076 2024-11-11T04:23:29,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1076) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1077 2024-11-11T04:23:29,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1077) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1078 2024-11-11T04:23:29,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1078) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1079 2024-11-11T04:23:29,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1079) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table930 2024-11-11T04:23:29,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table930) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table931 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table931) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table932 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table932) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table933 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table933) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table934 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table934) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table935 2024-11-11T04:23:29,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table935) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table936 2024-11-11T04:23:29,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table936) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1070 2024-11-11T04:23:29,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1070) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table937 2024-11-11T04:23:29,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table937) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1071 2024-11-11T04:23:29,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1071) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table938 2024-11-11T04:23:29,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table938) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table939 2024-11-11T04:23:29,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table939) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table900 2024-11-11T04:23:29,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table900) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table901 2024-11-11T04:23:29,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table901) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table902 2024-11-11T04:23:29,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table902) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table903 2024-11-11T04:23:29,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table903) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table904 2024-11-11T04:23:29,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table904) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table905 2024-11-11T04:23:29,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table905) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table906 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table906) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table907 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table907) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table908 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table908) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table909 2024-11-11T04:23:29,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2127514173=0, srv55389968=1} racks are {rack=0} 2024-11-11T04:23:29,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table909) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-11T04:23:29,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-11T04:23:29,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-11T04:23:29,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-11T04:23:29,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-11T04:23:29,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-11T04:23:29,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-11T04:23:29,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-11T04:23:29,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-11T04:23:29,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-11T04:23:29,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-11T04:23:29,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-11T04:23:29,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-11T04:23:29,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-11T04:23:29,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-11T04:23:29,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-11T04:23:29,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-11T04:23:29,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-11T04:23:29,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-11T04:23:29,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-11T04:23:29,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-11T04:23:29,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-11T04:23:29,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-11T04:23:29,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-11T04:23:29,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-11T04:23:29,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-11T04:23:29,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-11T04:23:29,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-11T04:23:29,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:29,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:29,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:29,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:29,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-11T04:23:29,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:29,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-11T04:23:29,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:29,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-11T04:23:29,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:29,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-11T04:23:29,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:29,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-11T04:23:29,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-11T04:23:29,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-11T04:23:29,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-11T04:23:29,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-11T04:23:29,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-11T04:23:29,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-11T04:23:29,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-11T04:23:29,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-11T04:23:29,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-11T04:23:29,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-11T04:23:29,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1988570887=1, srv1594688378=0} racks are {rack=0} 2024-11-11T04:23:29,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:29,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1083268949=0, srv1937500958=1, srv931809632=2} racks are {rack=0} 2024-11-11T04:23:29,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,536 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,536 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1083268949=0, srv1937500958=1, srv931809632=2} racks are {rack=0} 2024-11-11T04:23:29,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,536 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,536 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:29,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv294045951=1, srv1055451703=0, srv754066273=2} racks are {rack=0} 2024-11-11T04:23:29,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv294045951=1, srv1055451703=0, srv754066273=2} racks are {rack=0} 2024-11-11T04:23:29,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv294045951=1, srv1055451703=0, srv754066273=2} racks are {rack=0} 2024-11-11T04:23:29,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1164739768=1, srv1055331350=0, srv40853906=2} racks are {rack=0} 2024-11-11T04:23:29,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1164739768=1, srv1055331350=0, srv40853906=2} racks are {rack=0} 2024-11-11T04:23:29,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:29,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1204002444=0, srv911745094=2, srv599128105=1} racks are {rack=0} 2024-11-11T04:23:29,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1204002444=0, srv911745094=2, srv599128105=1} racks are {rack=0} 2024-11-11T04:23:29,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1204002444=0, srv911745094=2, srv599128105=1} racks are {rack=0} 2024-11-11T04:23:29,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:29,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv604186906=2, srv153521760=0, srv1710444482=1} racks are {rack=0} 2024-11-11T04:23:29,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:29,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv604186906=2, srv153521760=0, srv1710444482=1} racks are {rack=0} 2024-11-11T04:23:29,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv604186906=2, srv153521760=0, srv1710444482=1} racks are {rack=0} 2024-11-11T04:23:29,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv604186906=2, srv153521760=0, srv1710444482=1} racks are {rack=0} 2024-11-11T04:23:29,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-11T04:23:29,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-11T04:23:29,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-11T04:23:29,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-11T04:23:29,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-11T04:23:29,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-11T04:23:29,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-11T04:23:29,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:29,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:29,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:29,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:29,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:29,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:29,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-11T04:23:29,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:29,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-11T04:23:29,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:29,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-11T04:23:29,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1181133356=0, srv1182994421=1, srv1751839614=2} racks are {rack=0} 2024-11-11T04:23:29,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-11T04:23:29,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:29,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1737797293=0, srv1764961400=1, srv1954708167=2, srv473642904=3} racks are {rack=0} 2024-11-11T04:23:29,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1737797293=0, srv1764961400=1, srv1954708167=2, srv473642904=3} racks are {rack=0} 2024-11-11T04:23:29,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1737797293=0, srv1764961400=1, srv1954708167=2, srv473642904=3} racks are {rack=0} 2024-11-11T04:23:29,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:29,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2071310245=2, srv1103102126=1, srv342415569=3, srv1033875660=0} racks are {rack=0} 2024-11-11T04:23:29,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:29,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2071310245=2, srv1103102126=1, srv342415569=3, srv1033875660=0} racks are {rack=0} 2024-11-11T04:23:29,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2071310245=2, srv1103102126=1, srv342415569=3, srv1033875660=0} racks are {rack=0} 2024-11-11T04:23:29,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2071310245=2, srv1103102126=1, srv342415569=3, srv1033875660=0} racks are {rack=0} 2024-11-11T04:23:29,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:29,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1651138330=1, srv2122763367=2, srv224862432=3, srv137844870=0} racks are {rack=0} 2024-11-11T04:23:29,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:29,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1651138330=1, srv2122763367=2, srv224862432=3, srv137844870=0} racks are {rack=0} 2024-11-11T04:23:29,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:29,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1651138330=1, srv2122763367=2, srv224862432=3, srv137844870=0} racks are {rack=0} 2024-11-11T04:23:29,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1651138330=1, srv2122763367=2, srv224862432=3, srv137844870=0} racks are {rack=0} 2024-11-11T04:23:29,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1651138330=1, srv2122763367=2, srv224862432=3, srv137844870=0} racks are {rack=0} 2024-11-11T04:23:29,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:29,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1731976901=0, srv218882134=1, srv610375777=2, srv831577560=3} racks are {rack=0} 2024-11-11T04:23:29,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:29,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1731976901=0, srv218882134=1, srv610375777=2, srv831577560=3} racks are {rack=0} 2024-11-11T04:23:29,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:29,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1731976901=0, srv218882134=1, srv610375777=2, srv831577560=3} racks are {rack=0} 2024-11-11T04:23:29,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:29,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1731976901=0, srv218882134=1, srv610375777=2, srv831577560=3} racks are {rack=0} 2024-11-11T04:23:29,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1731976901=0, srv218882134=1, srv610375777=2, srv831577560=3} racks are {rack=0} 2024-11-11T04:23:29,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1731976901=0, srv218882134=1, srv610375777=2, srv831577560=3} racks are {rack=0} 2024-11-11T04:23:29,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:29,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv349159302=2, srv626122017=3, srv1555280845=1, srv1276875597=0} racks are {rack=0} 2024-11-11T04:23:29,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv349159302=2, srv626122017=3, srv1555280845=1, srv1276875597=0} racks are {rack=0} 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv349159302=2, srv626122017=3, srv1555280845=1, srv1276875597=0} racks are {rack=0} 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv349159302=2, srv626122017=3, srv1555280845=1, srv1276875597=0} racks are {rack=0} 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv349159302=2, srv626122017=3, srv1555280845=1, srv1276875597=0} racks are {rack=0} 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv349159302=2, srv626122017=3, srv1555280845=1, srv1276875597=0} racks are {rack=0} 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:29,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv401634459=3, srv1945798198=1, srv349043594=2, srv1597696101=0} racks are {rack=0} 2024-11-11T04:23:29,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:29,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv401634459=3, srv1945798198=1, srv349043594=2, srv1597696101=0} racks are {rack=0} 2024-11-11T04:23:29,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:29,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv401634459=3, srv1945798198=1, srv349043594=2, srv1597696101=0} racks are {rack=0} 2024-11-11T04:23:29,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:29,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv401634459=3, srv1945798198=1, srv349043594=2, srv1597696101=0} racks are {rack=0} 2024-11-11T04:23:29,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv401634459=3, srv1945798198=1, srv349043594=2, srv1597696101=0} racks are {rack=0} 2024-11-11T04:23:29,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv401634459=3, srv1945798198=1, srv349043594=2, srv1597696101=0} racks are {rack=0} 2024-11-11T04:23:29,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:29,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2137140986=2, srv139610436=0, srv2104243653=1, srv772444083=3} racks are {rack=0} 2024-11-11T04:23:29,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:29,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2137140986=2, srv139610436=0, srv2104243653=1, srv772444083=3} racks are {rack=0} 2024-11-11T04:23:29,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:29,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2137140986=2, srv139610436=0, srv2104243653=1, srv772444083=3} racks are {rack=0} 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2137140986=2, srv139610436=0, srv2104243653=1, srv772444083=3} racks are {rack=0} 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2137140986=2, srv139610436=0, srv2104243653=1, srv772444083=3} racks are {rack=0} 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2137140986=2, srv139610436=0, srv2104243653=1, srv772444083=3} racks are {rack=0} 2024-11-11T04:23:29,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:29,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1542496232=0, srv85058917=3, srv386822793=2, srv35824585=1} racks are {rack=0} 2024-11-11T04:23:29,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:29,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1542496232=0, srv85058917=3, srv386822793=2, srv35824585=1} racks are {rack=0} 2024-11-11T04:23:29,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:29,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1542496232=0, srv85058917=3, srv386822793=2, srv35824585=1} racks are {rack=0} 2024-11-11T04:23:29,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:29,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1542496232=0, srv85058917=3, srv386822793=2, srv35824585=1} racks are {rack=0} 2024-11-11T04:23:29,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1542496232=0, srv85058917=3, srv386822793=2, srv35824585=1} racks are {rack=0} 2024-11-11T04:23:29,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1542496232=0, srv85058917=3, srv386822793=2, srv35824585=1} racks are {rack=0} 2024-11-11T04:23:29,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:29,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1542496232=0, srv85058917=3, srv386822793=2, srv35824585=1} racks are {rack=0} 2024-11-11T04:23:29,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:29,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1881132404=2, srv1788376622=1, srv761604727=3, srv163382151=0} racks are {rack=0} 2024-11-11T04:23:29,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:29,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1881132404=2, srv1788376622=1, srv761604727=3, srv163382151=0} racks are {rack=0} 2024-11-11T04:23:29,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:29,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1881132404=2, srv1788376622=1, srv761604727=3, srv163382151=0} racks are {rack=0} 2024-11-11T04:23:29,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:29,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1881132404=2, srv1788376622=1, srv761604727=3, srv163382151=0} racks are {rack=0} 2024-11-11T04:23:29,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1881132404=2, srv1788376622=1, srv761604727=3, srv163382151=0} racks are {rack=0} 2024-11-11T04:23:29,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1881132404=2, srv1788376622=1, srv761604727=3, srv163382151=0} racks are {rack=0} 2024-11-11T04:23:29,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:29,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1881132404=2, srv1788376622=1, srv761604727=3, srv163382151=0} racks are {rack=0} 2024-11-11T04:23:29,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:29,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1881132404=2, srv1788376622=1, srv761604727=3, srv163382151=0} racks are {rack=0} 2024-11-11T04:23:29,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:29,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2054902064=1, srv1940077038=0, srv2137415430=2, srv705841325=3} racks are {rack=0} 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2054902064=1, srv1940077038=0, srv2137415430=2, srv705841325=3} racks are {rack=0} 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2054902064=1, srv1940077038=0, srv2137415430=2, srv705841325=3} racks are {rack=0} 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2054902064=1, srv1940077038=0, srv2137415430=2, srv705841325=3} racks are {rack=0} 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2054902064=1, srv1940077038=0, srv2137415430=2, srv705841325=3} racks are {rack=0} 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2054902064=1, srv1940077038=0, srv2137415430=2, srv705841325=3} racks are {rack=0} 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:29,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2054902064=1, srv1940077038=0, srv2137415430=2, srv705841325=3} racks are {rack=0} 2024-11-11T04:23:29,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-11T04:23:29,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:29,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902227345=4, srv1753658870=3, srv1189675735=0, srv1477101067=1, srv1527615835=2} racks are {rack=0} 2024-11-11T04:23:29,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902227345=4, srv1753658870=3, srv1189675735=0, srv1477101067=1, srv1527615835=2} racks are {rack=0} 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902227345=4, srv1753658870=3, srv1189675735=0, srv1477101067=1, srv1527615835=2} racks are {rack=0} 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-11T04:23:29,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:29,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902227345=4, srv1753658870=3, srv1189675735=0, srv1477101067=1, srv1527615835=2} racks are {rack=0} 2024-11-11T04:23:29,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-11T04:23:29,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1380 2024-11-11T04:23:29,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1381 2024-11-11T04:23:29,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table880 2024-11-11T04:23:29,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table880) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1140 2024-11-11T04:23:29,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1382 2024-11-11T04:23:29,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table881 2024-11-11T04:23:29,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table881) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1141 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1383 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table640 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table640) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table882 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table882) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1142 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1384 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table641 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table641) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table883 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table883) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1143 2024-11-11T04:23:29,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1385 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table400 2024-11-11T04:23:29,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table642 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table642) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table884 2024-11-11T04:23:29,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table884) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1144 2024-11-11T04:23:29,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1386 2024-11-11T04:23:29,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table401 2024-11-11T04:23:29,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table643 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table643) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table885 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table885) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1145 2024-11-11T04:23:29,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1387 2024-11-11T04:23:29,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table402 2024-11-11T04:23:29,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table644 2024-11-11T04:23:29,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table644) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table886 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table886) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table403 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table645 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table645) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table887 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table887) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table404 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table646 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table646) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table888 2024-11-11T04:23:29,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table888) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table405 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table647 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table647) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table889 2024-11-11T04:23:29,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table889) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table406 2024-11-11T04:23:29,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table648 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table648) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table407 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table649 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table649) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table408 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table409 2024-11-11T04:23:29,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1146 2024-11-11T04:23:29,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1388 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1147 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1389 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1148 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1149 2024-11-11T04:23:29,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1370 2024-11-11T04:23:29,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1371 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table870 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table870) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1130 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1372 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table871 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table871) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1131 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1373 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table630 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table630) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table872 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table872) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1132 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1374 2024-11-11T04:23:29,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table631 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table631) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table873 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table873) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1133 2024-11-11T04:23:29,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1375 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table632 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table632) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table874 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table874) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1134 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1376 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table633 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table633) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table875 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table875) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table634 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table634) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table876 2024-11-11T04:23:29,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table876) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table635 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table635) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table877 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table877) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table636 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table636) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table878 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table878) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table637 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table637) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table879 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table879) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table638 2024-11-11T04:23:29,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table638) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table639 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table639) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1135 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1377 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1136 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1378 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1137 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1379 2024-11-11T04:23:29,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1138 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1139 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table660 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table660) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1360 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table661 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table661) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1361 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table420 2024-11-11T04:23:29,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table662 2024-11-11T04:23:29,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table662) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1120 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1362 2024-11-11T04:23:29,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table421 2024-11-11T04:23:29,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table663 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table663) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1121 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1363 2024-11-11T04:23:29,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table422 2024-11-11T04:23:29,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table664 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table664) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1122 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1364 2024-11-11T04:23:29,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table423 2024-11-11T04:23:29,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table665 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table665) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1123 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1365 2024-11-11T04:23:29,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table424 2024-11-11T04:23:29,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table666 2024-11-11T04:23:29,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table666) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table425 2024-11-11T04:23:29,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table667 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table667) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table426 2024-11-11T04:23:29,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table668 2024-11-11T04:23:29,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table668) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table427 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table669 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table669) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table428 2024-11-11T04:23:29,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table429 2024-11-11T04:23:29,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1124 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1366 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1125 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1367 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1126 2024-11-11T04:23:29,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1368 2024-11-11T04:23:29,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1127 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1369 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1128 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1129 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table890 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table890) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table891 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table891) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table650 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table650) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table892 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table892) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1350 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table651 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table651) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table893 2024-11-11T04:23:29,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table893) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1351 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table410 2024-11-11T04:23:29,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table652 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table652) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table894 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table894) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1110 2024-11-11T04:23:29,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1352 2024-11-11T04:23:29,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table411 2024-11-11T04:23:29,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table653 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table653) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table895 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table895) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1111 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1353 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table412 2024-11-11T04:23:29,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table654 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table654) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table896 2024-11-11T04:23:29,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table896) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1112 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1354 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table413 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table655 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table655) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table897 2024-11-11T04:23:29,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table897) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table414 2024-11-11T04:23:29,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table656 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table656) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table898 2024-11-11T04:23:29,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table898) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table415 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table657 2024-11-11T04:23:29,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table657) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table899 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table899) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table416 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table658 2024-11-11T04:23:29,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table658) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table417 2024-11-11T04:23:29,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table659 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table659) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table418 2024-11-11T04:23:29,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table419 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1113 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1355 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1114 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1356 2024-11-11T04:23:29,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1115 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1357 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1116 2024-11-11T04:23:29,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1358 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1117 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1359 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1118 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1119 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1182 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1183 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1184 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1185 2024-11-11T04:23:29,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1186 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1187 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table840 2024-11-11T04:23:29,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table840) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1188 2024-11-11T04:23:29,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table841 2024-11-11T04:23:29,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table841) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1189 2024-11-11T04:23:29,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table600 2024-11-11T04:23:29,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table600) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table842 2024-11-11T04:23:29,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table842) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table601 2024-11-11T04:23:29,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table601) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table843 2024-11-11T04:23:29,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table843) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table602 2024-11-11T04:23:29,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table602) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table844 2024-11-11T04:23:29,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table844) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table603 2024-11-11T04:23:29,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table603) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table845 2024-11-11T04:23:29,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table845) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table604 2024-11-11T04:23:29,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table604) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table846 2024-11-11T04:23:29,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table846) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table605 2024-11-11T04:23:29,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table605) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table847 2024-11-11T04:23:29,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table847) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table606 2024-11-11T04:23:29,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table606) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table848 2024-11-11T04:23:29,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table848) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1180 2024-11-11T04:23:29,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table607 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table607) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table849 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table849) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1181 2024-11-11T04:23:29,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table608 2024-11-11T04:23:29,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table608) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table609 2024-11-11T04:23:29,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table609) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1171 2024-11-11T04:23:29,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1172 2024-11-11T04:23:29,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1173 2024-11-11T04:23:29,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1174 2024-11-11T04:23:29,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1175 2024-11-11T04:23:29,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1176 2024-11-11T04:23:29,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1177 2024-11-11T04:23:29,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table830 2024-11-11T04:23:29,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table830) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1178 2024-11-11T04:23:29,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table831 2024-11-11T04:23:29,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table831) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table832 2024-11-11T04:23:29,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table832) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table833 2024-11-11T04:23:29,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table833) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table834 2024-11-11T04:23:29,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table834) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table835 2024-11-11T04:23:29,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table835) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table836 2024-11-11T04:23:29,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table836) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table837 2024-11-11T04:23:29,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table837) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table838 2024-11-11T04:23:29,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table838) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1170 2024-11-11T04:23:29,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table839 2024-11-11T04:23:29,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table839) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1179 2024-11-11T04:23:29,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1160 2024-11-11T04:23:29,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1161 2024-11-11T04:23:29,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1162 2024-11-11T04:23:29,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1163 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table860 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table860) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1164 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table861 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table861) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1165 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table620 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table620) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table862 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table862) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1166 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table621 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table621) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table863 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table863) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1167 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table622 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table622) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table864 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table864) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table623 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table623) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table865 2024-11-11T04:23:29,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table865) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table624 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table624) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table866 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table866) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table625 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table625) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table867 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table867) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table626 2024-11-11T04:23:29,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table626) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table868 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table868) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table627 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table627) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table869 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table869) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table628 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table628) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table629 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table629) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1168 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1169 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1391 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1150 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1392 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1151 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1393 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1152 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1394 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1153 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1395 2024-11-11T04:23:29,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table850 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table850) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1154 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1396 2024-11-11T04:23:29,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table851 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table851) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1155 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1397 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table610 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table610) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table852 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table852) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1156 2024-11-11T04:23:29,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1398 2024-11-11T04:23:29,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table611 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table611) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table853 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table853) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table612 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table612) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table854 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table854) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table613 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table613) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table855 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table855) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table614 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table614) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table856 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table856) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table615 2024-11-11T04:23:29,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table615) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table857 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table857) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table616 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table616) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table858 2024-11-11T04:23:29,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table858) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table617 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table617) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table859 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table859) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table618 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table618) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1390 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table619 2024-11-11T04:23:29,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table619) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1157 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1399 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1158 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1159 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table240 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table482 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table241 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table483 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table242 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table484 2024-11-11T04:23:29,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table243 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table485 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table244 2024-11-11T04:23:29,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table486 2024-11-11T04:23:29,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table245 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table487 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table246 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table488 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table247 2024-11-11T04:23:29,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table489 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table248 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table249 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1308 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1309 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1300 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1301 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1302 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1303 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1304 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table490 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1305 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table491 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1306 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table250 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table492 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1307 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table471 2024-11-11T04:23:29,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table230 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table472 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table231 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table473 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table232 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table474 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table233 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table475 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table234 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table476 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table235 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table477 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table236 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table478 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table237 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table479 2024-11-11T04:23:29,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table238 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table239 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table480 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table481 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table262 2024-11-11T04:23:29,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table263 2024-11-11T04:23:29,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table264 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table265 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table266 2024-11-11T04:23:29,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table267 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table268 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table269 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table270 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table271 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table272 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table251 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table493 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table252 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table494 2024-11-11T04:23:29,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table253 2024-11-11T04:23:29,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table495 2024-11-11T04:23:29,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table254 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table496 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table255 2024-11-11T04:23:29,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table497 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table256 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table498 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table257 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table499 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table258 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table259 2024-11-11T04:23:29,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table260 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table261 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table680 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table680) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table681 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table681) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table440 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table682 2024-11-11T04:23:29,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table682) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table441 2024-11-11T04:23:29,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table683 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table683) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table200 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table442 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table684 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table684) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1340 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table201 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table443 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table685 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table685) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1341 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table202 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table444 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table686 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table686) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1100 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1342 2024-11-11T04:23:29,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table203 2024-11-11T04:23:29,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table445 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table687 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table687) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1101 2024-11-11T04:23:29,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1343 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table204 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table446 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table688 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table688) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table205 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table447 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table689 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table689) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table206 2024-11-11T04:23:29,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table448 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table207 2024-11-11T04:23:29,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table449 2024-11-11T04:23:29,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table208 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table209 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1102 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1344 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1103 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1345 2024-11-11T04:23:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1104 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1346 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1105 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1347 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1106 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1348 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1107 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1349 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1108 2024-11-11T04:23:29,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table690 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table690) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1109 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table670 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table670) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table671 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table671) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table430 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table672 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table672) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table431 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table673 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table673) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table432 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table674 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table674) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1330 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table433 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table675 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table675) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1331 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table434 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table676 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table676) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1332 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table435 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table677 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table677) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table436 2024-11-11T04:23:29,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table678 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table678) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table437 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table679 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table679) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table438 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table439 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1333 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1334 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1335 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1336 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1337 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1338 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1339 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table460 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table461 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table220 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table462 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table221 2024-11-11T04:23:29,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table463 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table222 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table464 2024-11-11T04:23:29,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table223 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table465 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table224 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table466 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1320 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table225 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table467 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1321 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table226 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table468 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table227 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table469 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table228 2024-11-11T04:23:29,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table229 2024-11-11T04:23:29,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1322 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1323 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1324 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1325 2024-11-11T04:23:29,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1326 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1327 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1328 2024-11-11T04:23:29,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table470 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1329 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table691 2024-11-11T04:23:29,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table691) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table450 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table692 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table692) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table451 2024-11-11T04:23:29,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table693 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table693) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table210 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table452 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table694 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table694) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table211 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table453 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table695 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table695) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table212 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table454 2024-11-11T04:23:29,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table696 2024-11-11T04:23:29,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table696) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table213 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table455 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table697 2024-11-11T04:23:29,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table697) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table214 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table456 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table698 2024-11-11T04:23:29,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table698) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1310 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table215 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table457 2024-11-11T04:23:29,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table699 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table699) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table216 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table458 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table217 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table459 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table218 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table219 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1319 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1311 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1312 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1313 2024-11-11T04:23:29,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1314 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1315 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1316 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1317 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1318 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table196 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table197 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table198 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table199 2024-11-11T04:23:29,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table163 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table164 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table165 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table166 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table167 2024-11-11T04:23:29,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table168 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table169 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table170 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table171 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table172 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table173 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table394 2024-11-11T04:23:29,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table395 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table396 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table155 2024-11-11T04:23:29,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table397 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table156 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table398 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table157 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table399 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table158 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table159 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table160 2024-11-11T04:23:29,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table161 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table162 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table185 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table186 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table187 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table188 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table189 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table190 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table191 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table192 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table193 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table194 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table195 2024-11-11T04:23:29,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table174 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table175 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table176 2024-11-11T04:23:29,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table177 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table178 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table179 2024-11-11T04:23:29,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table180 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table181 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table182 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table183 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table184 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table800 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table800) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table801 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table801) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table802 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table802) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table803 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table803) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table804 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table804) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table805 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table805) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table806 2024-11-11T04:23:29,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table806) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table807 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table807) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table808 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table808) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table809 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table809) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table820 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table820) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table821 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table821) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table822 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table822) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table823 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table823) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table824 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table824) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table825 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table825) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table826 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table826) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table827 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table827) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table828 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table828) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table829 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table829) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1193 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1194 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1195 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1196 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1197 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1198 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1199 2024-11-11T04:23:29,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table810 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table810) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table811 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table811) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table812 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table812) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table813 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table813) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table814 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table814) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1190 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table815 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table815) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1191 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table816 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table816) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1192 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table817 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table817) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table818 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table818) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table819 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table819) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1260 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1261 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table760 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table760) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1020 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1020) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1262 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table761 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table761) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1021 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1021) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1263 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table520 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table520) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table762 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table762) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1022 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1022) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1264 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table521 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table521) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table763 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table763) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1023 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1023) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1265 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table522 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table522) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table764 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table764) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1024 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1024) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1266 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table523 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table523) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table765 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table765) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table524 2024-11-11T04:23:29,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table524) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table766 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table766) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table525 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table525) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table767 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table767) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table526 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table526) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table768 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table768) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table527 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table527) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table769 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table769) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table528 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table528) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table529 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table529) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-11T04:23:29,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1025 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1025) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1267 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1026 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1026) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1268 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1027 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1027) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1269 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1028 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1028) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1029 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1029) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1490 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1491 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table990 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table990) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1250 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1492 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table991 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table991) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1251 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1493 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table750 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table750) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table992 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table992) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1010 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1010) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1252 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1494 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table751 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table751) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table993 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table993) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1011 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1011) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1253 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1495 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table510 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table510) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table752 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table752) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table994 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table994) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1012 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1012) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1254 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1496 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table511 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table511) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table753 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table753) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table995 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table995) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1013 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1013) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1255 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1497 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table512 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table512) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table754 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table754) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table996 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table996) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table513 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table513) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table755 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table755) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table997 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table997) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table514 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table514) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table756 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table756) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table998 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table998) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table515 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table515) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table757 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table757) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table999 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table999) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table516 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table516) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table758 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table758) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table517 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table517) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table759 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table759) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table518 2024-11-11T04:23:29,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table518) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table519 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table519) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-11T04:23:29,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1014 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1014) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1256 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1498 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1015 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1015) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1257 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1499 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-11T04:23:29,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1016 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1016) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1258 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1017 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1017) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1259 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1018 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1018) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1019 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1019) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table780 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table780) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1480 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table781 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table781) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1481 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table540 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table540) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table782 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table782) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1240 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1482 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table541 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table541) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table783 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table783) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1241 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1483 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table300 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table542 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table542) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table784 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table784) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1000 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1000) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1242 2024-11-11T04:23:29,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1484 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table301 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table543 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table543) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table785 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table785) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1001 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1001) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1243 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1485 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table302 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table544 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table544) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table786 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table786) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1002 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1002) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1244 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1486 2024-11-11T04:23:29,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table303 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table545 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table545) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table787 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table787) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table304 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table546 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table546) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table788 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table788) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table305 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table547 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table547) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table789 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table789) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table306 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table548 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table548) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table307 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table549 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table549) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table308 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table309 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1003 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1003) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1245 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1487 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1004 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1004) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1246 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1488 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1005 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1005) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1247 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1489 2024-11-11T04:23:29,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1006 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1006) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1248 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1007 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1007) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1249 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1008 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1008) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1009 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1009) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table770 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table770) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1470 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table771 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table771) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1471 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table530 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table530) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table772 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table772) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1230 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1472 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table531 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table531) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table773 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table773) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1231 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1473 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table532 2024-11-11T04:23:29,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table532) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table774 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table774) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1232 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1474 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table533 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table533) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table775 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table775) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1233 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1475 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table534 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table534) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table776 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table776) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table535 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table535) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table777 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table777) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table536 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table536) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table778 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table778) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table537 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table537) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table779 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table779) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table538 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table538) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table539 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table539) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1234 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1476 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1235 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1477 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1236 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1478 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1237 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1479 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1238 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1239 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1061 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1061) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1062 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1062) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1063 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1063) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1064 2024-11-11T04:23:29,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1064) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1065 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1065) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table960 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table960) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1066 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1066) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table961 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table961) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1067 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1067) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table720 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table720) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table962 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table962) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1068 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1068) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table721 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table721) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table963 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table963) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table722 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table722) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table964 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table964) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table723 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table723) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table965 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table965) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table724 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table724) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table966 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table966) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table725 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table725) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table967 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table967) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table726 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table726) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table968 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table968) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table727 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table727) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table969 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table969) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table728 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table728) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1060 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1060) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table729 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table729) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1069 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1069) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1050 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1050) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1292 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1051 2024-11-11T04:23:29,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1051) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1293 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1052 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1052) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1294 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1053 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1053) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1295 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1054 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1054) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1296 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1055 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1055) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1297 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table950 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table950) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1056 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1056) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1298 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table951 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table951) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1057 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1057) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1299 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table710 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table710) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table952 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table952) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table711 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table711) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table953 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table953) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table712 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table712) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table954 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table954) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table713 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table713) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table955 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table955) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table714 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table714) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table956 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table956) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table715 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table715) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table957 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table957) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table716 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table716) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table958 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table958) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1290 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table717 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table717) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table959 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table959) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1291 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table718 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table718) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table719 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table719) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1058 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1058) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1059 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1059) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1281 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1040 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1040) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1282 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1041 2024-11-11T04:23:29,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1041) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1283 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table980 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table980) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1042 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1042) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1284 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table981 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table981) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1043 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1043) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1285 2024-11-11T04:23:29,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table740 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table740) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table982 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table982) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1044 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1044) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1286 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table741 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table741) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table983 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table983) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1045 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1045) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1287 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table500 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table500) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table742 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table742) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table984 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table984) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1046 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1046) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1288 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table501 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table501) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table743 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table743) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table985 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table985) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table502 2024-11-11T04:23:29,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table502) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table744 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table744) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table986 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table986) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table503 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table503) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table745 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table745) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table987 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table987) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table504 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table504) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table746 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table746) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table988 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table988) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table505 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table505) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table747 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table747) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table989 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table989) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table506 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table506) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table748 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table748) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:29,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table507 2024-11-11T04:23:29,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table507) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table749 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table749) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table508 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table508) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1280 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table509 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table509) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1047 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1047) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1289 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1048 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1048) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1049 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1049) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1270 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1271 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1030 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1030) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1272 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1031 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1031) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1273 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table970 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table970) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1032 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1032) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1274 2024-11-11T04:23:30,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table971 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table971) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1033 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1033) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1275 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table730 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table730) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table972 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table972) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1034 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1034) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1276 2024-11-11T04:23:30,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table731 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table731) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table973 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table973) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1035 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1035) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1277 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table732 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table732) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table974 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table974) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table733 2024-11-11T04:23:30,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table733) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table975 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table975) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table734 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table734) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table976 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table976) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table735 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table735) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table977 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table977) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table736 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table736) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table978 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table978) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table737 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table737) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table979 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table979) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table738 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table738) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table739 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table739) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1036 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1036) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1278 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1037 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1037) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1279 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1038 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1038) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1039 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1039) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table361 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table362 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table363 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table364 2024-11-11T04:23:30,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table365 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table366 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table367 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table368 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1420 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table369 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1429 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1421 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1422 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1423 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1424 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1425 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1426 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table370 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1427 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table371 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1428 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table350 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table592 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table592) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table351 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table593 2024-11-11T04:23:30,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table593) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table352 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table594 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table594) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table353 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table595 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table595) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table354 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table596 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table596) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table355 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table597 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table597) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table356 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table598 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table598) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table357 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table599 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table599) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table358 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table359 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1418 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1419 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1410 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1411 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1412 2024-11-11T04:23:30,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1413 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1414 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1415 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1416 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table360 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1417 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table383 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table384 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table385 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table386 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table387 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table388 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-11T04:23:30,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table389 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-11T04:23:30,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1407 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1408 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1409 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1400 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1401 2024-11-11T04:23:30,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1402 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table390 2024-11-11T04:23:30,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1403 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table391 2024-11-11T04:23:30,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1404 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-11T04:23:30,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table392 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1405 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:30,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table393 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1406 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table372 2024-11-11T04:23:30,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table373 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table374 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table375 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table376 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table377 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table378 2024-11-11T04:23:30,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-11T04:23:30,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table379 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table380 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table381 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table382 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table560 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table560) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table561 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table561) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table320 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table562 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table562) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1460 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table321 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table563 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table563) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1461 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table322 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table564 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table564) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1220 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1462 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table323 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table565 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table565) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1221 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1463 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table324 2024-11-11T04:23:30,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table566 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table566) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1222 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1464 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table325 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table567 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table567) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table326 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table568 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table568) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table327 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table569 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table569) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table328 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table329 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1223 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1465 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1224 2024-11-11T04:23:30,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1466 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1225 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1467 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-11T04:23:30,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1226 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1468 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1227 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1469 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1228 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1229 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-11T04:23:30,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table790 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table790) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-11T04:23:30,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table791 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table791) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-11T04:23:30,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table550 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table550) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table792 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table792) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table551 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table551) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table793 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table793) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table310 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table552 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table552) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table794 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table794) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1450 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table311 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table553 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table553) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table795 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table795) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1451 2024-11-11T04:23:30,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table312 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table554 2024-11-11T04:23:30,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table554) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table796 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table796) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1210 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1452 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table313 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table555 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table555) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table797 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table797) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1211 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1453 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table314 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table556 2024-11-11T04:23:30,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table556) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table798 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table798) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table315 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table557 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table557) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table799 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table799) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table316 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table558 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table558) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table317 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table559 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table559) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table318 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table319 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1212 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1454 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1213 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1455 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1214 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1456 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-11T04:23:30,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1215 2024-11-11T04:23:30,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1457 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1216 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1458 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1217 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1459 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1218 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1219 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table581 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table581) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table340 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table582 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table582) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table341 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table583 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table583) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table342 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table584 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table584) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table343 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table585 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table585) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table344 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table586 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table586) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1440 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table345 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table587 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table587) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1441 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-11T04:23:30,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table346 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table588 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table588) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1200 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1442 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table347 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table589 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table589) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table348 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-11T04:23:30,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table349 2024-11-11T04:23:30,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1209 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1201 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1443 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1202 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1444 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1203 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1445 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-11T04:23:30,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1204 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1446 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1205 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1447 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1206 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1448 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table590 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table590) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1207 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1449 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table591 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table591) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1208 2024-11-11T04:23:30,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table570 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table570) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table571 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table571) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table330 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table572 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table572) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table331 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table573 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table573) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table332 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table574 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table574) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table333 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table575 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table575) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table334 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table576 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table576) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1430 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table335 2024-11-11T04:23:30,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table577 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table577) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1431 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table336 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table578 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table578) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table337 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table579 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table579) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table338 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table339 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1432 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1433 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1434 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1435 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1436 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-11T04:23:30,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1437 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1438 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table580 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table580) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1439 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table284 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table285 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table286 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table287 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table288 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table289 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table290 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table291 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table292 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table293 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table294 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table273 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table274 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table275 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table276 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table277 2024-11-11T04:23:30,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table278 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table279 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table280 2024-11-11T04:23:30,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table281 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table282 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table283 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table295 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table296 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table297 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table298 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table299 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table920 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table920) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table921 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table921) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table922 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table922) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table923 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table923) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table924 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table924) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table925 2024-11-11T04:23:30,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table925) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table926 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table926) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table927 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table927) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table928 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table928) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table929 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table929) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1094 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1094) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1095 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1095) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1096 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1096) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1097 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1097) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1098 2024-11-11T04:23:30,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1098) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1099 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1099) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table910 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table910) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table911 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table911) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table912 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table912) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1090 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1090) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table913 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table913) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1091 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1091) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table914 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table914) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1092 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1092) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table915 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table915) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1093 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1093) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table916 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table916) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table917 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table917) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table918 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table918) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table919 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table919) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1083 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1083) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1084 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1084) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1085 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1085) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1086 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1086) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1087 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1087) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1088 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1088) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1089 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1089) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table940 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table940) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table941 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table941) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table700 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table700) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table942 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table942) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table701 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table701) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table943 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table943) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table702 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table702) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table944 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table944) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table703 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table703) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table945 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table945) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table704 2024-11-11T04:23:30,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table704) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table946 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table946) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1080 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1080) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table705 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table705) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table947 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table947) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1081 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1081) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table706 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table706) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table948 2024-11-11T04:23:30,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table948) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1082 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1082) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table707 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table707) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table949 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table949) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table708 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table708) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table709 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table709) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1072 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1072) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1073 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1073) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1074 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1074) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1075 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1075) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1076 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1076) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1077 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1077) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1078 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1078) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1079 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1079) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table930 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table930) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table931 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table931) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table932 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table932) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table933 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table933) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table934 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table934) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table935 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table935) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table936 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table936) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1070 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1070) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table937 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table937) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1071 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1071) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table938 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table938) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table939 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table939) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table900 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table900) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table901 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table901) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table902 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table902) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table903 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table903) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table904 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table904) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table905 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table905) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table906 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table906) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table907 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table907) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table908 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table908) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table909 2024-11-11T04:23:30,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1221393750=1, srv1745892569=2, srv836767629=4, srv1968334357=3, srv885747946=5, srv1189910397=0} racks are {rack=0} 2024-11-11T04:23:30,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table909) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1380 2024-11-11T04:23:30,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1381 2024-11-11T04:23:30,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table880 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table880) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1140 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1382 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table881 2024-11-11T04:23:30,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table881) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1141 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1383 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table640 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table640) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table882 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table882) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1142 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1384 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table641 2024-11-11T04:23:30,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table641) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table883 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table883) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1143 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1385 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table400 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table642 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table642) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table884 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table884) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1144 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1386 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table401 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table643 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table643) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table885 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table885) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1145 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1387 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table402 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table644 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table644) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table886 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table886) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table403 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table645 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table645) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table887 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table887) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table404 2024-11-11T04:23:30,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table646 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table646) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table888 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table888) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table405 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table647 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table647) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table889 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table889) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table406 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table648 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table648) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table407 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table649 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table649) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table408 2024-11-11T04:23:30,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table409 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1146 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1388 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1147 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1389 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1148 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1149 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1370 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1371 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table870 2024-11-11T04:23:30,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table870) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1130 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1372 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table871 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table871) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1131 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1373 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table630 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table630) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table872 2024-11-11T04:23:30,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table872) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1132 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1374 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table631 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table631) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table873 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table873) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1133 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1375 2024-11-11T04:23:30,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table632 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table632) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table874 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table874) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1134 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1376 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table633 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table633) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table875 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table875) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table634 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table634) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table876 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table876) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table635 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table635) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table877 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table877) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table636 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table636) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table878 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table878) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table637 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table637) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table879 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table879) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table638 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table638) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table639 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table639) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1135 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1377 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1136 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1378 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1137 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1379 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1138 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1139 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table660 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table660) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1360 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table661 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table661) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1361 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table420 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table662 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table662) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1120 2024-11-11T04:23:30,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1362 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table421 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table663 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table663) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1121 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1363 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table422 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table664 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table664) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1122 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1364 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table423 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table665 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table665) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1123 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1365 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table424 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table666 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table666) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table425 2024-11-11T04:23:30,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table667 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table667) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table426 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table668 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table668) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table427 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table669 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table669) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table428 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table429 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1124 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1366 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1125 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1367 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1126 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1368 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1127 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1369 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1128 2024-11-11T04:23:30,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1129 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table890 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table890) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table891 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table891) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table650 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table650) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table892 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table892) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1350 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table651 2024-11-11T04:23:30,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table651) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table893 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table893) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1351 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table410 2024-11-11T04:23:30,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table652 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table652) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table894 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table894) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1110 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1352 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table411 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table653 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table653) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table895 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table895) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1111 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1353 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table412 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table654 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table654) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table896 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table896) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1112 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1354 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table413 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table655 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table655) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table897 2024-11-11T04:23:30,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table897) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table414 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table656 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table656) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table898 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table898) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table415 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table657 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table657) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table899 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table899) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table416 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table658 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table658) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table417 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table659 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table659) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table418 2024-11-11T04:23:30,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table419 2024-11-11T04:23:30,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1113 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1355 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1114 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1356 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1115 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1357 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1116 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1358 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1117 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1359 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1118 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1119 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1182 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1183 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1184 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1185 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1186 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1187 2024-11-11T04:23:30,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table840 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table840) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1188 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table841 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table841) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1189 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table600 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table600) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table842 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table842) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table601 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table601) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table843 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table843) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table602 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table602) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table844 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table844) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table603 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table603) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table845 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table845) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table604 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table604) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table846 2024-11-11T04:23:30,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table846) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table605 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table605) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table847 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table847) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table606 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table606) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table848 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table848) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1180 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table607 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table607) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table849 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table849) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1181 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table608 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table608) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table609 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table609) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1171 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1172 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1173 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1174 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1175 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1176 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1177 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table830 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table830) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1178 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table831 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table831) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table832 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table832) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table833 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table833) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table834 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table834) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table835 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table835) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table836 2024-11-11T04:23:30,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table836) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table837 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table837) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table838 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table838) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1170 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table839 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table839) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1179 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1160 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1161 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1162 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1163 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table860 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table860) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1164 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table861 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table861) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1165 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table620 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table620) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table862 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table862) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1166 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table621 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table621) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table863 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table863) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1167 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table622 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table622) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table864 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table864) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table623 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table623) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table865 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table865) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table624 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table624) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table866 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table866) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table625 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table625) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table867 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table867) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table626 2024-11-11T04:23:30,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table626) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table868 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table868) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table627 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table627) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table869 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table869) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table628 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table628) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table629 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table629) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1168 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1169 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1391 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1150 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1392 2024-11-11T04:23:30,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1151 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1393 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1152 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1394 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1153 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1395 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table850 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table850) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1154 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1396 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table851 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table851) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1155 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1397 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table610 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table610) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table852 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table852) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1156 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1398 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table611 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table611) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table853 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table853) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table612 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table612) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table854 2024-11-11T04:23:30,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table854) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table613 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table613) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table855 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table855) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table614 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table614) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table856 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table856) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table615 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table615) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table857 2024-11-11T04:23:30,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table857) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table616 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table616) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table858 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table858) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table617 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table617) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table859 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table859) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table618 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table618) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1390 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table619 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table619) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1157 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1399 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1158 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1159 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table240 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table482 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table241 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table483 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table242 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table484 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table243 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table485 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table244 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table486 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table245 2024-11-11T04:23:30,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table487 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table246 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table488 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table247 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table489 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table248 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table249 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1308 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1309 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1300 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1301 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1302 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1303 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1304 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table490 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1305 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table491 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1306 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table250 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table492 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1307 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table471 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table230 2024-11-11T04:23:30,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table472 2024-11-11T04:23:30,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table231 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table473 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table232 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table474 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table233 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table475 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table234 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table476 2024-11-11T04:23:30,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table235 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table477 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table236 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table478 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table237 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table479 2024-11-11T04:23:30,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table238 2024-11-11T04:23:30,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table239 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table480 2024-11-11T04:23:30,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table481 2024-11-11T04:23:30,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table262 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table263 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table264 2024-11-11T04:23:30,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table265 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table266 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table267 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table268 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table269 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table270 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table271 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table272 2024-11-11T04:23:30,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table251 2024-11-11T04:23:30,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table493 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table252 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table494 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table253 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table495 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table254 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table496 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table255 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table497 2024-11-11T04:23:30,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table256 2024-11-11T04:23:30,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table498 2024-11-11T04:23:30,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table257 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table499 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table258 2024-11-11T04:23:30,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table259 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table260 2024-11-11T04:23:30,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table261 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table680 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table680) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table681 2024-11-11T04:23:30,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table681) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table440 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table682 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table682) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table441 2024-11-11T04:23:30,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table683 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table683) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table200 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table442 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table684 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table684) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1340 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table201 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table443 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table685 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table685) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1341 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table202 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table444 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table686 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table686) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1100 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1342 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table203 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table445 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table687 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table687) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1101 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1343 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table204 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table446 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table688 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table688) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table205 2024-11-11T04:23:30,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table447 2024-11-11T04:23:30,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table689 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table689) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table206 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table448 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table207 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table449 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table208 2024-11-11T04:23:30,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table209 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1102 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1344 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1103 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1345 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1104 2024-11-11T04:23:30,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1346 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1105 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1347 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1106 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1348 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1107 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1349 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1108 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table690 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table690) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1109 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table670 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table670) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table671 2024-11-11T04:23:30,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table671) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table430 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table672 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table672) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table431 2024-11-11T04:23:30,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table673 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table673) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table432 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table674 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table674) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1330 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table433 2024-11-11T04:23:30,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table675 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table675) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1331 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table434 2024-11-11T04:23:30,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table676 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table676) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1332 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table435 2024-11-11T04:23:30,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table677 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table677) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table436 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table678 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table678) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table437 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table679 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table679) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table438 2024-11-11T04:23:30,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table439 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1333 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1334 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1335 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1336 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1337 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1338 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1339 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table460 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table461 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table220 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table462 2024-11-11T04:23:30,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table221 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table463 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table222 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table464 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table223 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table465 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table224 2024-11-11T04:23:30,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table466 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1320 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table225 2024-11-11T04:23:30,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table467 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1321 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table226 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table468 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table227 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table469 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table228 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table229 2024-11-11T04:23:30,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1322 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1323 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1324 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1325 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1326 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1327 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1328 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table470 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1329 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table691 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table691) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table450 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table692 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table692) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table451 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table693 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table693) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table210 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table452 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table694 2024-11-11T04:23:30,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table694) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table211 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table453 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table695 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table695) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table212 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table454 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table696 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table696) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table213 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table455 2024-11-11T04:23:30,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table697 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table697) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table214 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table456 2024-11-11T04:23:30,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table698 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table698) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1310 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table215 2024-11-11T04:23:30,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table457 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table699 2024-11-11T04:23:30,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table699) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table216 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table458 2024-11-11T04:23:30,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table217 2024-11-11T04:23:30,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table459 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table218 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table219 2024-11-11T04:23:30,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1319 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1311 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1312 2024-11-11T04:23:30,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1313 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1314 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1315 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1316 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1317 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1318 2024-11-11T04:23:30,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table196 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table197 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table198 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table199 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table163 2024-11-11T04:23:30,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table164 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table165 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table166 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table167 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table168 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table169 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table170 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table171 2024-11-11T04:23:30,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table172 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table173 2024-11-11T04:23:30,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table394 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table395 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-11T04:23:30,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table396 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table155 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table397 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table156 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table398 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table157 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table399 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table158 2024-11-11T04:23:30,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table159 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table160 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table161 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table162 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table185 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table186 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table187 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table188 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table189 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table190 2024-11-11T04:23:30,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table191 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table192 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table193 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table194 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table195 2024-11-11T04:23:30,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table174 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table175 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table176 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table177 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table178 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table179 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table180 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table181 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table182 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table183 2024-11-11T04:23:30,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table184 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table800 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table800) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table801 2024-11-11T04:23:30,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table801) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table802 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table802) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table803 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table803) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table804 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table804) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table805 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table805) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table806 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table806) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table807 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table807) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table808 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table808) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table809 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table809) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table820 2024-11-11T04:23:30,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table820) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table821 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table821) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table822 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table822) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table823 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table823) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table824 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table824) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table825 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table825) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table826 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table826) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table827 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table827) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table828 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table828) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table829 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table829) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1193 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1194 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1195 2024-11-11T04:23:30,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1196 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1197 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1198 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1199 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table810 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table810) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table811 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table811) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table812 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table812) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table813 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table813) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table814 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table814) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1190 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table815 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table815) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1191 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table816 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table816) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1192 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table817 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table817) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table818 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table818) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table819 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table819) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1260 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1261 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table760 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table760) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1020 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1020) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1262 2024-11-11T04:23:30,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table761 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table761) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1021 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1021) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1263 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table520 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table520) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table762 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table762) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1022 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1022) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1264 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table521 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table521) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table763 2024-11-11T04:23:30,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table763) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1023 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1023) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1265 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table522 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table522) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table764 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table764) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1024 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1024) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1266 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table523 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table523) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table765 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table765) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table524 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table524) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table766 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table766) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table525 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table525) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table767 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table767) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table526 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table526) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table768 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table768) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table527 2024-11-11T04:23:30,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table527) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table769 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table769) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table528 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table528) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table529 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table529) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-11T04:23:30,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1025 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1025) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1267 2024-11-11T04:23:30,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1026 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1026) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1268 2024-11-11T04:23:30,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1027 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1027) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1269 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1028 2024-11-11T04:23:30,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1028) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1029 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1029) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-11T04:23:30,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1490 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1491 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table990 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table990) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1250 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1492 2024-11-11T04:23:30,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table991 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table991) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1251 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1493 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table750 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table750) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table992 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table992) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1010 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1010) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1252 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1494 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table751 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table751) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table993 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table993) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1011 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1011) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1253 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1495 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table510 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table510) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table752 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table752) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table994 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table994) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1012 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1012) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1254 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1496 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table511 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table511) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table753 2024-11-11T04:23:30,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table753) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table995 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table995) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1013 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1013) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1255 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1497 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table512 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table512) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table754 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table754) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table996 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table996) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table513 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table513) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table755 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table755) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table997 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table997) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table514 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table514) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table756 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table756) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table998 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table998) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table515 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table515) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table757 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table757) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table999 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table999) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table516 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table516) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table758 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table758) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table517 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table517) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table759 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table759) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table518 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table518) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table519 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table519) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1014 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1014) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1256 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1498 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1015 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1015) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1257 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1499 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-11T04:23:30,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1016 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1016) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1258 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-11T04:23:30,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1017 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1017) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1259 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-11T04:23:30,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1018 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1018) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1019 2024-11-11T04:23:30,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1019) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-11T04:23:30,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table780 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table780) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1480 2024-11-11T04:23:30,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table781 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table781) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1481 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table540 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table540) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table782 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table782) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1240 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1482 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table541 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table541) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table783 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table783) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1241 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1483 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table300 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table542 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table542) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table784 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table784) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1000 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1000) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1242 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1484 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table301 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table543 2024-11-11T04:23:30,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table543) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table785 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table785) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1001 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1001) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1243 2024-11-11T04:23:30,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1485 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table302 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table544 2024-11-11T04:23:30,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table544) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table786 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table786) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1002 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1002) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1244 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1486 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table303 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table545 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table545) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table787 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table787) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table304 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table546 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table546) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table788 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table788) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table305 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table547 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table547) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table789 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table789) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table306 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table548 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table548) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table307 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table549 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table549) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table308 2024-11-11T04:23:30,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table309 2024-11-11T04:23:30,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1003 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1003) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1245 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1487 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-11T04:23:30,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1004 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1004) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1246 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1488 2024-11-11T04:23:30,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1005 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1005) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1247 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1489 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-11T04:23:30,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1006 2024-11-11T04:23:30,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1006) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1248 2024-11-11T04:23:30,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-11T04:23:30,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1007 2024-11-11T04:23:30,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1007) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1249 2024-11-11T04:23:30,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-11T04:23:30,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,479 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,479 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,479 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1008 2024-11-11T04:23:30,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,479 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,479 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1008) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,479 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-11T04:23:30,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1009 2024-11-11T04:23:30,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1009) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-11T04:23:30,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-11T04:23:30,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-11T04:23:30,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table770 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table770) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1470 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table771 2024-11-11T04:23:30,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table771) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1471 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table530 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table530) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table772 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table772) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1230 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1472 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table531 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table531) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table773 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table773) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1231 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1473 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table532 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table532) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table774 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table774) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1232 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1474 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table533 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table533) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table775 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table775) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1233 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1475 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table534 2024-11-11T04:23:30,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table534) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table776 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table776) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table535 2024-11-11T04:23:30,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table535) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table777 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table777) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table536 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table536) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table778 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table778) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table537 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table537) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table779 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table779) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table538 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table538) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table539 2024-11-11T04:23:30,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table539) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-11T04:23:30,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1234 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1476 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1235 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1477 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1236 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1478 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-11T04:23:30,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1237 2024-11-11T04:23:30,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1479 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1238 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1239 2024-11-11T04:23:30,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-11T04:23:30,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-11T04:23:30,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1061 2024-11-11T04:23:30,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1061) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1062 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1062) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1063 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1063) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1064 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1064) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1065 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1065) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table960 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table960) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1066 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1066) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table961 2024-11-11T04:23:30,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table961) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1067 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1067) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table720 2024-11-11T04:23:30,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table720) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table962 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table962) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1068 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1068) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table721 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table721) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table963 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table963) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table722 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table722) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table964 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table964) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table723 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table723) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table965 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table965) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table724 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table724) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table966 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table966) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table725 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table725) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table967 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table967) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table726 2024-11-11T04:23:30,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table726) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table968 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table968) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table727 2024-11-11T04:23:30,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table727) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table969 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table969) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table728 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table728) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1060 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1060) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table729 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table729) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1069 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1069) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1050 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1050) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1292 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1051 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1051) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1293 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1052 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1052) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1294 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1053 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1053) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1295 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1054 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1054) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1296 2024-11-11T04:23:30,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1055 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1055) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1297 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table950 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table950) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1056 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1056) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1298 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table951 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table951) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1057 2024-11-11T04:23:30,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1057) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1299 2024-11-11T04:23:30,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table710 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table710) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table952 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table952) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table711 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table711) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table953 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table953) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table712 2024-11-11T04:23:30,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table712) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table954 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table954) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table713 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table713) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table955 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table955) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table714 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table714) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table956 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table956) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table715 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table715) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table957 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table957) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table716 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table716) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table958 2024-11-11T04:23:30,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table958) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1290 2024-11-11T04:23:30,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table717 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table717) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table959 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table959) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1291 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table718 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table718) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table719 2024-11-11T04:23:30,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table719) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1058 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1058) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1059 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1059) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1281 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1040 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1040) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1282 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1041 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1041) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1283 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table980 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table980) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1042 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1042) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1284 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table981 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table981) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1043 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1043) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1285 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table740 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table740) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table982 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table982) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1044 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1044) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1286 2024-11-11T04:23:30,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table741 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table741) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table983 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table983) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1045 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1045) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1287 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table500 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table500) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table742 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table742) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table984 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table984) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1046 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1046) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1288 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table501 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table501) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table743 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table743) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table985 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table985) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table502 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table502) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table744 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table744) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table986 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table986) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table503 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table503) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table745 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table745) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table987 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table987) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table504 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table504) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table746 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table746) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table988 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table988) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table505 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table505) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table747 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table747) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table989 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table989) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table506 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table506) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table748 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table748) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table507 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table507) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table749 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table749) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table508 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table508) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1280 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table509 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table509) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1047 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1047) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1289 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1048 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1048) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1049 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1049) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1270 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1271 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1030 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1030) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1272 2024-11-11T04:23:30,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1031 2024-11-11T04:23:30,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1031) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1273 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table970 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table970) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1032 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1032) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1274 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table971 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table971) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1033 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1033) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1275 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table730 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table730) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table972 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table972) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1034 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1034) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1276 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table731 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table731) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table973 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table973) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1035 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1035) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1277 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table732 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table732) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table974 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table974) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table733 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table733) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table975 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table975) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table734 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table734) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table976 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table976) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table735 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table735) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table977 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table977) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table736 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table736) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table978 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table978) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table737 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table737) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table979 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table979) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table738 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table738) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table739 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table739) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1036 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1036) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1278 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1037 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1037) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1279 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1038 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1038) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1039 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1039) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table361 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table362 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table363 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-11T04:23:30,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table364 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-11T04:23:30,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table365 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-11T04:23:30,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table366 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-11T04:23:30,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table367 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table368 2024-11-11T04:23:30,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1420 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table369 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1429 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1421 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1422 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,578 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,578 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1423 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1424 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1425 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1426 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table370 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1427 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table371 2024-11-11T04:23:30,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1428 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table350 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,582 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table592 2024-11-11T04:23:30,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table592) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table351 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table593 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table593) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table352 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table594 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table594) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table353 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table595 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table595) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table354 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table596 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table596) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,586 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table355 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table597 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table597) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table356 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table598 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table598) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table357 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table599 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table599) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,589 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,589 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table358 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table359 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,591 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1418 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1419 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1410 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1411 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,592 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1412 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1413 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1414 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1415 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1416 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table360 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1417 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table383 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table384 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,596 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table385 2024-11-11T04:23:30,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table386 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,597 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-11T04:23:30,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,598 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,598 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table387 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,598 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,598 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table388 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table389 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1407 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,601 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1408 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1409 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,602 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:30,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,603 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1400 2024-11-11T04:23:30,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1401 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1402 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table390 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1403 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table391 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1404 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table392 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1405 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table393 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1406 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table372 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table373 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table374 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table375 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-11T04:23:30,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table376 2024-11-11T04:23:30,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table377 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-11T04:23:30,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table378 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table379 2024-11-11T04:23:30,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table380 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table381 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table382 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-11T04:23:30,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table560 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table560) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table561 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table561) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table320 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table562 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table562) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1460 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table321 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table563 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table563) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1461 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table322 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table564 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table564) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1220 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1462 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table323 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table565 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table565) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1221 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1463 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table324 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table566 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table566) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1222 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1464 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table325 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table567 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table567) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table326 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table568 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table568) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table327 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table569 2024-11-11T04:23:30,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table569) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table328 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table329 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1223 2024-11-11T04:23:30,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1465 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1224 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1466 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1225 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1467 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1226 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1468 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1227 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1469 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1228 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1229 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table790 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table790) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table791 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table791) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table550 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table550) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table792 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table792) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table551 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table551) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table793 2024-11-11T04:23:30,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table793) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table310 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table552 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table552) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table794 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table794) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1450 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table311 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table553 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table553) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table795 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table795) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1451 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table312 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table554 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table554) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table796 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table796) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1210 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1452 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table313 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table555 2024-11-11T04:23:30,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table555) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table797 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table797) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1211 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1453 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table314 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table556 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table556) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table798 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table798) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table315 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table557 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table557) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table799 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table799) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table316 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table558 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table558) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table317 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table559 2024-11-11T04:23:30,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table559) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table318 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table319 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1212 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1454 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1213 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1455 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1214 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1456 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1215 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1457 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-11T04:23:30,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1216 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1458 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1217 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1459 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1218 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1219 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table581 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table581) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table340 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table582 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table582) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table341 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table583 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table583) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-11T04:23:30,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table342 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table584 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table584) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table343 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table585 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table585) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table344 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table586 2024-11-11T04:23:30,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table586) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1440 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table345 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table587 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table587) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1441 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table346 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table588 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table588) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1200 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1442 2024-11-11T04:23:30,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table347 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table589 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table589) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table348 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table349 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1209 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1201 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1443 2024-11-11T04:23:30,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1202 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1444 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1203 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1445 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1204 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1446 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-11T04:23:30,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1205 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1447 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1206 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1448 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table590 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table590) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1207 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1449 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-11T04:23:30,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table591 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table591) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1208 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-11T04:23:30,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table570 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table570) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table571 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table571) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table330 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table572 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table572) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table331 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table573 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table573) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table332 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table574 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table574) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table333 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table575 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table575) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table334 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table576 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table576) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1430 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table335 2024-11-11T04:23:30,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table577 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table577) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1431 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table336 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table578 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table578) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table337 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table579 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table579) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table338 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table339 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1432 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1433 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1434 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1435 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1436 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1437 2024-11-11T04:23:30,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1438 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table580 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table580) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1439 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table284 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table285 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table286 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table287 2024-11-11T04:23:30,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table288 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table289 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table290 2024-11-11T04:23:30,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table291 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table292 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table293 2024-11-11T04:23:30,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table294 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table273 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table274 2024-11-11T04:23:30,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table275 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table276 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table277 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table278 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table279 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table280 2024-11-11T04:23:30,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table281 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table282 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table283 2024-11-11T04:23:30,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table295 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table296 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table297 2024-11-11T04:23:30,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table298 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table299 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table920 2024-11-11T04:23:30,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table920) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table921 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table921) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table922 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table922) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table923 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table923) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table924 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table924) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table925 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table925) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table926 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table926) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table927 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table927) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table928 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table928) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table929 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table929) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1094 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1094) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1095 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1095) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1096 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1096) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1097 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1097) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1098 2024-11-11T04:23:30,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1098) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1099 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1099) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table910 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table910) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table911 2024-11-11T04:23:30,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table911) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table912 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table912) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1090 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1090) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table913 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table913) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1091 2024-11-11T04:23:30,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1091) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table914 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table914) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1092 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1092) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table915 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table915) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1093 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1093) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table916 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table916) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table917 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table917) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table918 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table918) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table919 2024-11-11T04:23:30,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table919) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1083 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1083) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1084 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1084) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1085 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1085) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1086 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1086) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1087 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1087) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1088 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1088) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1089 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1089) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table940 2024-11-11T04:23:30,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table940) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table941 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table941) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table700 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table700) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table942 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table942) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table701 2024-11-11T04:23:30,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table701) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table943 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table943) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table702 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table702) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table944 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table944) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table703 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table703) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table945 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table945) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table704 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table704) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table946 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table946) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1080 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1080) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table705 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table705) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table947 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table947) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1081 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1081) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table706 2024-11-11T04:23:30,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table706) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table948 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table948) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1082 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1082) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table707 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table707) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table949 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table949) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table708 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table708) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table709 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table709) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1072 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1072) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1073 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1073) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1074 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1074) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1075 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1075) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1076 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1076) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1077 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1077) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1078 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1078) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1079 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1079) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table930 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table930) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table931 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table931) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table932 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table932) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table933 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table933) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table934 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table934) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table935 2024-11-11T04:23:30,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table935) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table936 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table936) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1070 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1070) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table937 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table937) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1071 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1071) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table938 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table938) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table939 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table939) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table900 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table900) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table901 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table901) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table902 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table902) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table903 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table903) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table904 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table904) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table905 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table905) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table906 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table906) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table907 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table907) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table908 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table908) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table909 2024-11-11T04:23:30,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2146099471=5, srv1230816291=3, srv1543314833=4, srv1122117808=0, srv1229847721=2, srv1182882320=1} racks are {rack=0} 2024-11-11T04:23:30,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-11T04:23:30,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table909) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-11T04:23:30,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1746906356=5, srv1688436859=3, srv1805743145=6, srv481882030=11, srv888495634=14, srv2040468773=9, srv1720040507=4, srv2029639005=8, srv1485591205=2, srv881073263=13, srv2122730016=10, srv1107801672=1, srv1842556938=7, srv548334457=12, srv1059499677=0} racks are {rack=0} 2024-11-11T04:23:30,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:30,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:30,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:30,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:30,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:30,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:30,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:30,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:30,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:30,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:30,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1746906356=5, srv1688436859=3, srv1805743145=6, srv481882030=11, srv888495634=14, srv2040468773=9, srv1720040507=4, srv2029639005=8, srv1485591205=2, srv881073263=13, srv2122730016=10, srv1107801672=1, srv1842556938=7, srv548334457=12, srv1059499677=0} racks are {rack=0} 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1746906356=5, srv1688436859=3, srv1805743145=6, srv481882030=11, srv888495634=14, srv2040468773=9, srv1720040507=4, srv2029639005=8, srv1485591205=2, srv881073263=13, srv2122730016=10, srv1107801672=1, srv1842556938=7, srv548334457=12, srv1059499677=0} racks are {rack=0} 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:30,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:30,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:30,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:30,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:30,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:30,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:30,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:30,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:30,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:30,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-11T04:23:30,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:30,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1746906356=5, srv1688436859=3, srv1805743145=6, srv481882030=11, srv888495634=14, srv2040468773=9, srv1720040507=4, srv2029639005=8, srv1485591205=2, srv881073263=13, srv2122730016=10, srv1107801672=1, srv1842556938=7, srv548334457=12, srv1059499677=0} racks are {rack=0} 2024-11-11T04:23:30,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:30,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:30,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:30,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:30,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:30,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:30,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:30,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:30,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:30,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:30,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-11T04:23:30,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:30,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1746906356=5, srv1688436859=3, srv1805743145=6, srv481882030=11, srv888495634=14, srv2040468773=9, srv1720040507=4, srv2029639005=8, srv1485591205=2, srv881073263=13, srv2122730016=10, srv1107801672=1, srv1842556938=7, srv548334457=12, srv1059499677=0} racks are {rack=0} 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:30,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:30,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:30,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:30,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:30,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1746906356=5, srv1688436859=3, srv1805743145=6, srv481882030=11, srv888495634=14, srv2040468773=9, srv1720040507=4, srv2029639005=8, srv1485591205=2, srv881073263=13, srv2122730016=10, srv1107801672=1, srv1842556938=7, srv548334457=12, srv1059499677=0} racks are {rack=0} 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1746906356=5, srv1688436859=3, srv1805743145=6, srv481882030=11, srv888495634=14, srv2040468773=9, srv1720040507=4, srv2029639005=8, srv1485591205=2, srv881073263=13, srv2122730016=10, srv1107801672=1, srv1842556938=7, srv548334457=12, srv1059499677=0} racks are {rack=0} 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:30,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1746906356=5, srv1688436859=3, srv1805743145=6, srv481882030=11, srv888495634=14, srv2040468773=9, srv1720040507=4, srv2029639005=8, srv1485591205=2, srv881073263=13, srv2122730016=10, srv1107801672=1, srv1842556938=7, srv548334457=12, srv1059499677=0} racks are {rack=0} 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:30,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:30,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1746906356=5, srv1688436859=3, srv1805743145=6, srv481882030=11, srv888495634=14, srv2040468773=9, srv1720040507=4, srv2029639005=8, srv1485591205=2, srv881073263=13, srv2122730016=10, srv1107801672=1, srv1842556938=7, srv548334457=12, srv1059499677=0} racks are {rack=0} 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1746906356=5, srv1688436859=3, srv1805743145=6, srv481882030=11, srv888495634=14, srv2040468773=9, srv1720040507=4, srv2029639005=8, srv1485591205=2, srv881073263=13, srv2122730016=10, srv1107801672=1, srv1842556938=7, srv548334457=12, srv1059499677=0} racks are {rack=0} 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1746906356=5, srv1688436859=3, srv1805743145=6, srv481882030=11, srv888495634=14, srv2040468773=9, srv1720040507=4, srv2029639005=8, srv1485591205=2, srv881073263=13, srv2122730016=10, srv1107801672=1, srv1842556938=7, srv548334457=12, srv1059499677=0} racks are {rack=0} 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:30,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1746906356=5, srv1688436859=3, srv1805743145=6, srv481882030=11, srv888495634=14, srv2040468773=9, srv1720040507=4, srv2029639005=8, srv1485591205=2, srv881073263=13, srv2122730016=10, srv1107801672=1, srv1842556938=7, srv548334457=12, srv1059499677=0} racks are {rack=0} 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1746906356=5, srv1688436859=3, srv1805743145=6, srv481882030=11, srv888495634=14, srv2040468773=9, srv1720040507=4, srv2029639005=8, srv1485591205=2, srv881073263=13, srv2122730016=10, srv1107801672=1, srv1842556938=7, srv548334457=12, srv1059499677=0} racks are {rack=0} 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:30,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1746906356=5, srv1688436859=3, srv1805743145=6, srv481882030=11, srv888495634=14, srv2040468773=9, srv1720040507=4, srv2029639005=8, srv1485591205=2, srv881073263=13, srv2122730016=10, srv1107801672=1, srv1842556938=7, srv548334457=12, srv1059499677=0} racks are {rack=0} 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:30,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:30,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1746906356=5, srv1688436859=3, srv1805743145=6, srv481882030=11, srv888495634=14, srv2040468773=9, srv1720040507=4, srv2029639005=8, srv1485591205=2, srv881073263=13, srv2122730016=10, srv1107801672=1, srv1842556938=7, srv548334457=12, srv1059499677=0} racks are {rack=0} 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:30,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1794870265=4, srv1546735941=0, srv1884810156=5, srv878980852=9, srv413113873=6, srv1686230221=2, srv760613710=8, srv549600164=7, srv1726113672=3, srv1554288129=1} racks are {rack=0} 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1794870265=4, srv1546735941=0, srv1884810156=5, srv878980852=9, srv413113873=6, srv1686230221=2, srv760613710=8, srv549600164=7, srv1726113672=3, srv1554288129=1} racks are {rack=0} 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1794870265=4, srv1546735941=0, srv1884810156=5, srv878980852=9, srv413113873=6, srv1686230221=2, srv760613710=8, srv549600164=7, srv1726113672=3, srv1554288129=1} racks are {rack=0} 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1794870265=4, srv1546735941=0, srv1884810156=5, srv878980852=9, srv413113873=6, srv1686230221=2, srv760613710=8, srv549600164=7, srv1726113672=3, srv1554288129=1} racks are {rack=0} 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1794870265=4, srv1546735941=0, srv1884810156=5, srv878980852=9, srv413113873=6, srv1686230221=2, srv760613710=8, srv549600164=7, srv1726113672=3, srv1554288129=1} racks are {rack=0} 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1794870265=4, srv1546735941=0, srv1884810156=5, srv878980852=9, srv413113873=6, srv1686230221=2, srv760613710=8, srv549600164=7, srv1726113672=3, srv1554288129=1} racks are {rack=0} 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1794870265=4, srv1546735941=0, srv1884810156=5, srv878980852=9, srv413113873=6, srv1686230221=2, srv760613710=8, srv549600164=7, srv1726113672=3, srv1554288129=1} racks are {rack=0} 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1794870265=4, srv1546735941=0, srv1884810156=5, srv878980852=9, srv413113873=6, srv1686230221=2, srv760613710=8, srv549600164=7, srv1726113672=3, srv1554288129=1} racks are {rack=0} 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1794870265=4, srv1546735941=0, srv1884810156=5, srv878980852=9, srv413113873=6, srv1686230221=2, srv760613710=8, srv549600164=7, srv1726113672=3, srv1554288129=1} racks are {rack=0} 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1794870265=4, srv1546735941=0, srv1884810156=5, srv878980852=9, srv413113873=6, srv1686230221=2, srv760613710=8, srv549600164=7, srv1726113672=3, srv1554288129=1} racks are {rack=0} 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv471897143=7, srv588575362=9, srv1049269701=0, srv1172361959=1, srv256837280=6, srv1510147855=3, srv245251357=5, srv482638367=8, srv2129043299=4, srv1250139654=2} racks are {rack=0} 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv471897143=7, srv588575362=9, srv1049269701=0, srv1172361959=1, srv256837280=6, srv1510147855=3, srv245251357=5, srv482638367=8, srv2129043299=4, srv1250139654=2} racks are {rack=0} 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv471897143=7, srv588575362=9, srv1049269701=0, srv1172361959=1, srv256837280=6, srv1510147855=3, srv245251357=5, srv482638367=8, srv2129043299=4, srv1250139654=2} racks are {rack=0} 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv471897143=7, srv588575362=9, srv1049269701=0, srv1172361959=1, srv256837280=6, srv1510147855=3, srv245251357=5, srv482638367=8, srv2129043299=4, srv1250139654=2} racks are {rack=0} 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:30,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv471897143=7, srv588575362=9, srv1049269701=0, srv1172361959=1, srv256837280=6, srv1510147855=3, srv245251357=5, srv482638367=8, srv2129043299=4, srv1250139654=2} racks are {rack=0} 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv471897143=7, srv588575362=9, srv1049269701=0, srv1172361959=1, srv256837280=6, srv1510147855=3, srv245251357=5, srv482638367=8, srv2129043299=4, srv1250139654=2} racks are {rack=0} 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-11T04:23:30,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-11T04:23:30,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-11T04:23:30,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:30,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-11T04:23:30,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-11T04:23:30,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1285271288=0, srv2115014411=4, srv2025405749=3, srv551593734=8, srv845614396=9, srv1713966786=2, srv304674156=6, srv1649184584=1, srv238252773=5, srv332868129=7} racks are {rack=0} 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-11T04:23:30,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-11T04:23:30,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-11T04:23:30,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-11T04:23:30,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-11T04:23:30,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-11T04:23:30,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1359710066=2, srv1773774659=5, srv1086436819=0, srv1532471720=3, srv377860379=8, srv861317709=9, srv220102605=7, srv1632224927=4, srv1166842298=1, srv1990529729=6} racks are {rack=0} 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-11T04:23:30,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:30,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:30,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1490237145=4, srv248070984=8, srv1514872983=5, srv1039859473=0, srv334966039=9, srv1338452876=2, srv1354186412=3, srv1201055376=1, srv172357223=6, srv1917535759=7} racks are {rack=0} 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1379176229=2, srv416347672=7, srv175154119=4, srv1875202767=5, srv229395848=6, srv1172770705=0, srv909940522=8, srv1707820680=3, srv981747268=9, srv1358126463=1} racks are {rack=0} 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1379176229=2, srv416347672=7, srv175154119=4, srv1875202767=5, srv229395848=6, srv1172770705=0, srv909940522=8, srv1707820680=3, srv981747268=9, srv1358126463=1} racks are {rack=0} 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1379176229=2, srv416347672=7, srv175154119=4, srv1875202767=5, srv229395848=6, srv1172770705=0, srv909940522=8, srv1707820680=3, srv981747268=9, srv1358126463=1} racks are {rack=0} 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1379176229=2, srv416347672=7, srv175154119=4, srv1875202767=5, srv229395848=6, srv1172770705=0, srv909940522=8, srv1707820680=3, srv981747268=9, srv1358126463=1} racks are {rack=0} 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1379176229=2, srv416347672=7, srv175154119=4, srv1875202767=5, srv229395848=6, srv1172770705=0, srv909940522=8, srv1707820680=3, srv981747268=9, srv1358126463=1} racks are {rack=0} 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1379176229=2, srv416347672=7, srv175154119=4, srv1875202767=5, srv229395848=6, srv1172770705=0, srv909940522=8, srv1707820680=3, srv981747268=9, srv1358126463=1} racks are {rack=0} 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:30,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1379176229=2, srv416347672=7, srv175154119=4, srv1875202767=5, srv229395848=6, srv1172770705=0, srv909940522=8, srv1707820680=3, srv981747268=9, srv1358126463=1} racks are {rack=0} 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1379176229=2, srv416347672=7, srv175154119=4, srv1875202767=5, srv229395848=6, srv1172770705=0, srv909940522=8, srv1707820680=3, srv981747268=9, srv1358126463=1} racks are {rack=0} 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1379176229=2, srv416347672=7, srv175154119=4, srv1875202767=5, srv229395848=6, srv1172770705=0, srv909940522=8, srv1707820680=3, srv981747268=9, srv1358126463=1} racks are {rack=0} 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1379176229=2, srv416347672=7, srv175154119=4, srv1875202767=5, srv229395848=6, srv1172770705=0, srv909940522=8, srv1707820680=3, srv981747268=9, srv1358126463=1} racks are {rack=0} 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1379176229=2, srv416347672=7, srv175154119=4, srv1875202767=5, srv229395848=6, srv1172770705=0, srv909940522=8, srv1707820680=3, srv981747268=9, srv1358126463=1} racks are {rack=0} 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1379176229=2, srv416347672=7, srv175154119=4, srv1875202767=5, srv229395848=6, srv1172770705=0, srv909940522=8, srv1707820680=3, srv981747268=9, srv1358126463=1} racks are {rack=0} 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1379176229=2, srv416347672=7, srv175154119=4, srv1875202767=5, srv229395848=6, srv1172770705=0, srv909940522=8, srv1707820680=3, srv981747268=9, srv1358126463=1} racks are {rack=0} 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1379176229=2, srv416347672=7, srv175154119=4, srv1875202767=5, srv229395848=6, srv1172770705=0, srv909940522=8, srv1707820680=3, srv981747268=9, srv1358126463=1} racks are {rack=0} 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1379176229=2, srv416347672=7, srv175154119=4, srv1875202767=5, srv229395848=6, srv1172770705=0, srv909940522=8, srv1707820680=3, srv981747268=9, srv1358126463=1} racks are {rack=0} 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1379176229=2, srv416347672=7, srv175154119=4, srv1875202767=5, srv229395848=6, srv1172770705=0, srv909940522=8, srv1707820680=3, srv981747268=9, srv1358126463=1} racks are {rack=0} 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1282003719=1, srv28115420=6, srv1012555978=0, srv1890217830=3, srv667373034=9, srv2019539739=4, srv1445553864=2, srv510329504=7, srv26309103=5, srv632424709=8} racks are {rack=0} 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1282003719=1, srv28115420=6, srv1012555978=0, srv1890217830=3, srv667373034=9, srv2019539739=4, srv1445553864=2, srv510329504=7, srv26309103=5, srv632424709=8} racks are {rack=0} 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1282003719=1, srv28115420=6, srv1012555978=0, srv1890217830=3, srv667373034=9, srv2019539739=4, srv1445553864=2, srv510329504=7, srv26309103=5, srv632424709=8} racks are {rack=0} 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1282003719=1, srv28115420=6, srv1012555978=0, srv1890217830=3, srv667373034=9, srv2019539739=4, srv1445553864=2, srv510329504=7, srv26309103=5, srv632424709=8} racks are {rack=0} 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1282003719=1, srv28115420=6, srv1012555978=0, srv1890217830=3, srv667373034=9, srv2019539739=4, srv1445553864=2, srv510329504=7, srv26309103=5, srv632424709=8} racks are {rack=0} 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1282003719=1, srv28115420=6, srv1012555978=0, srv1890217830=3, srv667373034=9, srv2019539739=4, srv1445553864=2, srv510329504=7, srv26309103=5, srv632424709=8} racks are {rack=0} 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1282003719=1, srv28115420=6, srv1012555978=0, srv1890217830=3, srv667373034=9, srv2019539739=4, srv1445553864=2, srv510329504=7, srv26309103=5, srv632424709=8} racks are {rack=0} 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1282003719=1, srv28115420=6, srv1012555978=0, srv1890217830=3, srv667373034=9, srv2019539739=4, srv1445553864=2, srv510329504=7, srv26309103=5, srv632424709=8} racks are {rack=0} 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1699737998=4, srv720736339=8, srv1702172411=5, srv1560977127=3, srv1343234822=2, srv919558882=9, srv1186288394=1, srv563406408=7, srv1084826720=0, srv47637277=6} racks are {rack=0} 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1699737998=4, srv720736339=8, srv1702172411=5, srv1560977127=3, srv1343234822=2, srv919558882=9, srv1186288394=1, srv563406408=7, srv1084826720=0, srv47637277=6} racks are {rack=0} 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1699737998=4, srv720736339=8, srv1702172411=5, srv1560977127=3, srv1343234822=2, srv919558882=9, srv1186288394=1, srv563406408=7, srv1084826720=0, srv47637277=6} racks are {rack=0} 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1699737998=4, srv720736339=8, srv1702172411=5, srv1560977127=3, srv1343234822=2, srv919558882=9, srv1186288394=1, srv563406408=7, srv1084826720=0, srv47637277=6} racks are {rack=0} 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1699737998=4, srv720736339=8, srv1702172411=5, srv1560977127=3, srv1343234822=2, srv919558882=9, srv1186288394=1, srv563406408=7, srv1084826720=0, srv47637277=6} racks are {rack=0} 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1699737998=4, srv720736339=8, srv1702172411=5, srv1560977127=3, srv1343234822=2, srv919558882=9, srv1186288394=1, srv563406408=7, srv1084826720=0, srv47637277=6} racks are {rack=0} 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1699737998=4, srv720736339=8, srv1702172411=5, srv1560977127=3, srv1343234822=2, srv919558882=9, srv1186288394=1, srv563406408=7, srv1084826720=0, srv47637277=6} racks are {rack=0} 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1699737998=4, srv720736339=8, srv1702172411=5, srv1560977127=3, srv1343234822=2, srv919558882=9, srv1186288394=1, srv563406408=7, srv1084826720=0, srv47637277=6} racks are {rack=0} 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1699737998=4, srv720736339=8, srv1702172411=5, srv1560977127=3, srv1343234822=2, srv919558882=9, srv1186288394=1, srv563406408=7, srv1084826720=0, srv47637277=6} racks are {rack=0} 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1903768503=2, srv1944923119=3, srv473543180=6, srv1549467921=1, srv746051342=9, srv301968360=4, srv338804465=5, srv494272569=7, srv1341597184=0, srv506468719=8} racks are {rack=0} 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1903768503=2, srv1944923119=3, srv473543180=6, srv1549467921=1, srv746051342=9, srv301968360=4, srv338804465=5, srv494272569=7, srv1341597184=0, srv506468719=8} racks are {rack=0} 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1903768503=2, srv1944923119=3, srv473543180=6, srv1549467921=1, srv746051342=9, srv301968360=4, srv338804465=5, srv494272569=7, srv1341597184=0, srv506468719=8} racks are {rack=0} 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1903768503=2, srv1944923119=3, srv473543180=6, srv1549467921=1, srv746051342=9, srv301968360=4, srv338804465=5, srv494272569=7, srv1341597184=0, srv506468719=8} racks are {rack=0} 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1903768503=2, srv1944923119=3, srv473543180=6, srv1549467921=1, srv746051342=9, srv301968360=4, srv338804465=5, srv494272569=7, srv1341597184=0, srv506468719=8} racks are {rack=0} 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1903768503=2, srv1944923119=3, srv473543180=6, srv1549467921=1, srv746051342=9, srv301968360=4, srv338804465=5, srv494272569=7, srv1341597184=0, srv506468719=8} racks are {rack=0} 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1903768503=2, srv1944923119=3, srv473543180=6, srv1549467921=1, srv746051342=9, srv301968360=4, srv338804465=5, srv494272569=7, srv1341597184=0, srv506468719=8} racks are {rack=0} 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1903768503=2, srv1944923119=3, srv473543180=6, srv1549467921=1, srv746051342=9, srv301968360=4, srv338804465=5, srv494272569=7, srv1341597184=0, srv506468719=8} racks are {rack=0} 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1903768503=2, srv1944923119=3, srv473543180=6, srv1549467921=1, srv746051342=9, srv301968360=4, srv338804465=5, srv494272569=7, srv1341597184=0, srv506468719=8} racks are {rack=0} 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1903768503=2, srv1944923119=3, srv473543180=6, srv1549467921=1, srv746051342=9, srv301968360=4, srv338804465=5, srv494272569=7, srv1341597184=0, srv506468719=8} racks are {rack=0} 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-11T04:23:30,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-11T04:23:30,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-11T04:23:30,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-11T04:23:30,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-11T04:23:30,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-11T04:23:30,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1932912901=4, srv1655253629=2, srv473618314=7, srv1971835389=5, srv2046869629=6, srv1558245847=1, srv778424909=8, srv1678883688=3, srv952607215=9, srv1459556912=0} racks are {rack=0} 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-11T04:23:30,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:30,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-11T04:23:30,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-11T04:23:30,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-11T04:23:30,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-11T04:23:30,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-11T04:23:30,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-11T04:23:30,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv833950315=9, srv1406722316=2, srv1843546426=3, srv330724065=5, srv1368917282=1, srv736369797=8, srv347336202=6, srv288771675=4, srv100840725=0, srv39484085=7} racks are {rack=0} 2024-11-11T04:23:30,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:30,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:30,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-11T04:23:30,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv873389993=6, srv1766244197=4, srv1186124631=1, srv1325616660=2, srv294479504=5, srv960547561=7, srv1158814137=0, srv1591027619=3} racks are {rack=0} 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv873389993=6, srv1766244197=4, srv1186124631=1, srv1325616660=2, srv294479504=5, srv960547561=7, srv1158814137=0, srv1591027619=3} racks are {rack=0} 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv873389993=6, srv1766244197=4, srv1186124631=1, srv1325616660=2, srv294479504=5, srv960547561=7, srv1158814137=0, srv1591027619=3} racks are {rack=0} 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv873389993=6, srv1766244197=4, srv1186124631=1, srv1325616660=2, srv294479504=5, srv960547561=7, srv1158814137=0, srv1591027619=3} racks are {rack=0} 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv873389993=6, srv1766244197=4, srv1186124631=1, srv1325616660=2, srv294479504=5, srv960547561=7, srv1158814137=0, srv1591027619=3} racks are {rack=0} 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv873389993=6, srv1766244197=4, srv1186124631=1, srv1325616660=2, srv294479504=5, srv960547561=7, srv1158814137=0, srv1591027619=3} racks are {rack=0} 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv873389993=6, srv1766244197=4, srv1186124631=1, srv1325616660=2, srv294479504=5, srv960547561=7, srv1158814137=0, srv1591027619=3} racks are {rack=0} 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv873389993=6, srv1766244197=4, srv1186124631=1, srv1325616660=2, srv294479504=5, srv960547561=7, srv1158814137=0, srv1591027619=3} racks are {rack=0} 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv873389993=6, srv1766244197=4, srv1186124631=1, srv1325616660=2, srv294479504=5, srv960547561=7, srv1158814137=0, srv1591027619=3} racks are {rack=0} 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv873389993=6, srv1766244197=4, srv1186124631=1, srv1325616660=2, srv294479504=5, srv960547561=7, srv1158814137=0, srv1591027619=3} racks are {rack=0} 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv873389993=6, srv1766244197=4, srv1186124631=1, srv1325616660=2, srv294479504=5, srv960547561=7, srv1158814137=0, srv1591027619=3} racks are {rack=0} 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv873389993=6, srv1766244197=4, srv1186124631=1, srv1325616660=2, srv294479504=5, srv960547561=7, srv1158814137=0, srv1591027619=3} racks are {rack=0} 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv873389993=6, srv1766244197=4, srv1186124631=1, srv1325616660=2, srv294479504=5, srv960547561=7, srv1158814137=0, srv1591027619=3} racks are {rack=0} 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-11T04:23:30,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv873389993=6, srv1766244197=4, srv1186124631=1, srv1325616660=2, srv294479504=5, srv960547561=7, srv1158814137=0, srv1591027619=3} racks are {rack=0} 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv565132769=6, srv1626034648=2, srv1105277041=0, srv2070372305=5, srv1969987131=4, srv947301338=7, srv1458128493=1, srv188521549=3} racks are {rack=0} 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv565132769=6, srv1626034648=2, srv1105277041=0, srv2070372305=5, srv1969987131=4, srv947301338=7, srv1458128493=1, srv188521549=3} racks are {rack=0} 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv565132769=6, srv1626034648=2, srv1105277041=0, srv2070372305=5, srv1969987131=4, srv947301338=7, srv1458128493=1, srv188521549=3} racks are {rack=0} 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:30,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv565132769=6, srv1626034648=2, srv1105277041=0, srv2070372305=5, srv1969987131=4, srv947301338=7, srv1458128493=1, srv188521549=3} racks are {rack=0} 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv565132769=6, srv1626034648=2, srv1105277041=0, srv2070372305=5, srv1969987131=4, srv947301338=7, srv1458128493=1, srv188521549=3} racks are {rack=0} 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv565132769=6, srv1626034648=2, srv1105277041=0, srv2070372305=5, srv1969987131=4, srv947301338=7, srv1458128493=1, srv188521549=3} racks are {rack=0} 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv565132769=6, srv1626034648=2, srv1105277041=0, srv2070372305=5, srv1969987131=4, srv947301338=7, srv1458128493=1, srv188521549=3} racks are {rack=0} 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv565132769=6, srv1626034648=2, srv1105277041=0, srv2070372305=5, srv1969987131=4, srv947301338=7, srv1458128493=1, srv188521549=3} racks are {rack=0} 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv565132769=6, srv1626034648=2, srv1105277041=0, srv2070372305=5, srv1969987131=4, srv947301338=7, srv1458128493=1, srv188521549=3} racks are {rack=0} 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv565132769=6, srv1626034648=2, srv1105277041=0, srv2070372305=5, srv1969987131=4, srv947301338=7, srv1458128493=1, srv188521549=3} racks are {rack=0} 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv565132769=6, srv1626034648=2, srv1105277041=0, srv2070372305=5, srv1969987131=4, srv947301338=7, srv1458128493=1, srv188521549=3} racks are {rack=0} 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv565132769=6, srv1626034648=2, srv1105277041=0, srv2070372305=5, srv1969987131=4, srv947301338=7, srv1458128493=1, srv188521549=3} racks are {rack=0} 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv565132769=6, srv1626034648=2, srv1105277041=0, srv2070372305=5, srv1969987131=4, srv947301338=7, srv1458128493=1, srv188521549=3} racks are {rack=0} 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv565132769=6, srv1626034648=2, srv1105277041=0, srv2070372305=5, srv1969987131=4, srv947301338=7, srv1458128493=1, srv188521549=3} racks are {rack=0} 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-11T04:23:30,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-11T04:23:30,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-11T04:23:30,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-11T04:23:30,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-11T04:23:30,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-11T04:23:30,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:30,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-11T04:23:30,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-11T04:23:30,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-11T04:23:30,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-11T04:23:30,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-11T04:23:30,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-11T04:23:30,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-11T04:23:30,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv174535257=1, srv1153506678=0, srv351362049=2, srv441536239=4, srv472158651=5, srv84812107=6, srv386636658=3, srv973509489=7} racks are {rack=0} 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-11T04:23:30,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-11T04:23:30,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-11T04:23:30,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-11T04:23:30,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:30,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:30,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-11T04:23:31,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-11T04:23:31,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:31,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-11T04:23:31,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-11T04:23:31,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-11T04:23:31,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-11T04:23:31,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-11T04:23:31,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-11T04:23:31,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1032361270=0, srv1571242441=2, srv43583022=6, srv1194980059=1, srv1989500781=4, srv401366684=5, srv1783036206=3, srv546496439=7} racks are {rack=0} 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:31,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv703111057=2, srv997162620=4, srv2032771770=1, srv81402673=3, srv1427888113=0} racks are {rack=0} 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv703111057=2, srv997162620=4, srv2032771770=1, srv81402673=3, srv1427888113=0} racks are {rack=0} 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv703111057=2, srv997162620=4, srv2032771770=1, srv81402673=3, srv1427888113=0} racks are {rack=0} 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv703111057=2, srv997162620=4, srv2032771770=1, srv81402673=3, srv1427888113=0} racks are {rack=0} 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:31,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv703111057=2, srv997162620=4, srv2032771770=1, srv81402673=3, srv1427888113=0} racks are {rack=0} 2024-11-11T04:23:31,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-11T04:23:31,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-11T04:23:31,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-11T04:23:31,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-11T04:23:31,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-11T04:23:31,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-11T04:23:31,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-11T04:23:31,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-11T04:23:31,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-11T04:23:31,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-11T04:23:31,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-11T04:23:31,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-11T04:23:31,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-11T04:23:31,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-11T04:23:31,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-11T04:23:31,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-11T04:23:31,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-11T04:23:31,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-11T04:23:31,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-11T04:23:31,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-11T04:23:31,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-11T04:23:31,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-11T04:23:31,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-11T04:23:31,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-11T04:23:31,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-11T04:23:31,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-11T04:23:31,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-11T04:23:31,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-11T04:23:31,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-11T04:23:31,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-11T04:23:31,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-11T04:23:31,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-11T04:23:31,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-11T04:23:31,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-11T04:23:31,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-11T04:23:31,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-11T04:23:31,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-11T04:23:31,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-11T04:23:31,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-11T04:23:31,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-11T04:23:31,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-11T04:23:31,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-11T04:23:31,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-11T04:23:31,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-11T04:23:31,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-11T04:23:31,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,594 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-11T04:23:31,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-11T04:23:31,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-11T04:23:31,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-11T04:23:31,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-11T04:23:31,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-11T04:23:31,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1515351537=97, srv380239331=271, srv1587732834=114, srv350502508=268, srv1867335617=177, srv2103700198=227, srv950592304=382, srv170245254=132, srv547292329=297, srv287764635=254, srv590818994=308, srv1856997664=175, srv170519133=134, srv605720118=312, srv1082952421=19, srv1409402443=69, srv274667166=253, srv259125592=250, srv415588724=280, srv561715684=302, srv1022200451=6, srv150186743=89, srv1442688387=72, srv864907081=364, srv777555506=350, srv325361322=262, srv850540246=360, srv1238360784=46, srv2135096259=235, srv1732158942=142, srv444722655=285, srv616784721=314, srv1077138174=16, srv760092859=344, srv34864275=267, srv649353820=324, srv815349753=357, srv1712595442=135, srv594778363=309, srv729566538=339, srv1930139479=189, srv990295792=392, srv961909176=385, srv1003567750=1, srv161886500=120, srv666528945=329, srv174062503=145, srv771927526=348, srv1753506357=153, srv1109888810=25, srv1172501768=35, srv628106095=316, srv883789549=370, srv771810329=347, srv1263222045=50, srv2050970783=218, srv1156341986=31, srv56091121=301, srv32698319=263, srv1377066088=66, srv93052214=378, srv1503790556=90, srv1839856581=171, srv1792179019=160, srv119708729=40, srv1760389232=154, srv1471834077=79, srv2042732697=216, srv1325931166=58, srv554590077=300, srv1088823845=20, srv1682778121=127, srv1275033463=53, srv1876395650=180, srv1568870626=109, srv871473711=366, srv1703168710=133, srv2142440649=238, srv1895936991=183, srv1927964062=188, srv1837720204=170, srv1095529737=22, srv1074665432=15, srv1217338301=43, srv1514331067=95, srv1752653613=152, srv1760604450=155, srv637125556=318, srv811349489=356, srv1727224440=141, srv1667245165=125, srv875648266=367, srv1968862954=197, srv562594655=303, srv1264562524=51, srv1049272459=11, srv1985425479=199, srv1166478839=34, srv906103104=374, srv1856589801=174, srv1614543030=119, srv6679715=330, srv2027018661=211, srv1340704875=60, srv1970749470=198, srv2057313313=220, srv891268572=371, srv911368117=376, srv1011859460=3, srv396832613=274, srv1744218384=149, srv204071982=214, srv896698353=372, srv478161689=288, srv493783263=291, srv806181798=355, srv1178698520=38, srv383171447=272, srv1461897610=77, srv1389715904=67, srv2000023486=204, srv1394291425=68, srv250591484=246, srv263730073=252, srv1450360563=73, srv1581440690=112, srv1741479847=147, srv1878876221=181, srv172043416=138, srv968283272=386, srv1148354474=28, srv640128425=320, srv686226980=333, srv1480615532=82, srv789676598=353, srv954530934=383, srv162137301=121, srv926100070=377, srv1508546374=93, srv1065686101=13, srv1216527252=42, srv1772527239=157, srv1678527965=126, srv2020627670=209, srv1068268526=14, srv596803434=310, srv1450675557=74, srv935998840=380, srv189734008=185, srv740146826=342, srv1345401733=62, srv876878660=368, srv547979270=298, srv174142577=146, srv1543632457=103, srv55123874=299, srv646284150=323, srv1358193794=63, srv2035645987=213, srv1579644071=111, srv685141563=332, srv407689516=278, srv1844264258=172, srv1797336607=163, srv1635485774=123, srv1896763279=184, srv241150071=244, srv1587763404=115, srv974509484=387, srv2090957561=224, srv1555654637=107, srv1514380594=96, srv1800710216=164, srv1606757111=117, srv527289635=295, srv564725203=304, srv1966684704=196, srv115321889=29, srv10507895=12, srv1946902961=192, srv1090916111=21, srv296060629=257, srv910234461=375, srv877496707=369, srv835635736=359, srv41840911=281, srv1319900058=57, srv441236078=284, srv1108644305=24, srv2033936681=212, srv356442333=269, srv1482157540=84, srv2011509364=207, srv1993570384=203, srv1796028185=162, srv1765588927=156, srv406713326=277, srv15224782=100, srv1164866628=33, srv510170206=292, srv1029020232=8, srv715363981=335, srv1988115951=200, srv1614298547=118, srv211573175=230, srv1684898639=128, srv1037085099=10, srv2137988929=236, srv1365438416=64, srv1372714187=65, srv223592388=241, srv1327718395=59, srv1177573471=37, srv1732597865=143, srv588096557=307, srv1026456991=7, srv1463161913=78, srv523332847=294, srv2014142096=208, srv2112282719=228, srv1738094059=144, srv723807781=337, srv2061723046=222, srv1514223004=94, srv302950095=258, srv1815572490=167, srv421715046=282, srv2042369558=215, srv724544053=338, srv230669562=243, srv1564566667=108, srv1862326439=176, srv640014381=319, srv251999143=247, srv645001152=322, srv168710942=129, srv935331723=379, srv101483699=4, srv2115085149=229, srv583047279=306, srv1176169557=36, srv1715548729=137, srv634057257=317, srv1586556479=113, srv2133048423=234, srv657516532=326, srv735938328=341, srv295055440=256, srv603898242=311, srv1499081397=88, srv659454230=327, srv2055659633=219, srv261504992=251, srv1217523007=44, srv517673346=293, srv2131435902=233, srv851127783=362, srv2081232840=223, srv544546668=296, srv981951651=388, srv1481763814=83, srv1822301190=168, srv308622041=260, srv1417600462=71, srv1549847234=105, srv1836678892=169, srv627624063=315, srv291220164=255, srv2119713721=231, srv187279012=179, srv1902055196=186, srv148606580=86, srv763019739=345, srv1630044736=122, srv221708484=240, srv660250515=328, srv684877094=331, srv644438559=321, srv1258862135=49, srv458074092=286, srv1714756580=136, srv1941078874=190, srv427085677=283, srv345786498=266, srv1554775046=106, srv1890439745=182, srv258886596=249, srv2003241893=205, srv2047483672=217, srv305204754=259, srv1485460002=85, srv1545090607=104, srv986518976=391, srv1495398173=87, srv1854706935=173, srv2122306346=232, srv722294800=336, srv1003668183=2, srv729853031=340, srv114581728=26, srv707187316=334, srv1078596780=17, srv1099276862=23, srv38714363=273, srv1222035982=45, srv788708514=352, srv401244697=275, srv2100639833=226, srv829714917=358, srv1003080932=0, srv2025532194=210, srv1942309321=191, srv487139025=290, srv482174232=289, srv257902134=248, srv1319873936=56, srv649923863=325, srv1793414328=161, srv1687258472=130, srv1519788162=99, srv60582926=313, srv961059078=384, srv945346785=381, srv767174102=346, srv1600615232=116, srv1778610087=158, srv1991182117=201, srv1341660614=61, srv1748277175=151, srv215793449=239, srv147432002=80, srv758061954=343, srv172453482=140, srv1912538541=187, srv1080911289=18, srv2141996492=237, srv1266233855=52, srv1145862424=27, srv1201739847=41, srv1956073202=194, srv1416097901=70, srv1526911577=102, srv2092965777=225, srv318071653=261, srv1991997226=202, srv1156295080=30, srv1190178860=39, srv1246842539=48, srv1950786568=193, srv897546620=373, srv163613908=124, srv335266897=264, srv1812661265=166, srv150780087=92, srv1723390546=139, srv246241887=245, srv571195621=305, srv1570671625=110, srv10370211=9, srv1517702157=98, srv405661787=276, srv1455562100=76, srv1164514800=32, srv797759372=354, srv859490345=363, srv1014935397=5, srv1300642161=55, srv774591685=349, srv472123562=287, srv2010584105=206, srv2060889652=221, srv1741831347=148, srv851008315=361, srv1809319807=165, srv195950534=195, srv1788411356=159, srv984708097=390, srv1244888763=47, srv168796190=131, srv34140062=265, srv784978809=351, srv152494705=101, srv1870735416=178, srv228742479=242, srv1478893378=81, srv87131690=365, srv983990340=389, srv1507659632=91, srv1280326601=54, srv361981140=270, srv411672877=279, srv145247488=75, srv1744922489=150} racks are {rack=0} 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:23:31,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:23:31,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:23:31,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:23:31,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:23:31,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:23:31,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:23:31,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:23:31,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:23:31,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:23:31,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:23:31,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:23:31,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:23:31,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:23:31,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:23:31,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:23:31,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:23:31,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:23:31,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:23:31,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:23:31,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:23:31,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:23:31,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-11T04:23:31,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=-1.0 2024-11-11T04:23:31,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 577.0 etc. 2024-11-11T04:23:31,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1674645609=0, srv792227937=1} racks are {rack=0} 2024-11-11T04:23:31,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=2, number of racks=1 2024-11-11T04:23:31,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=3200 2024-11-11T04:23:31,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 36 ms to try 3200 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2039267072=1, srv1279617097=0} racks are {rack=0} 2024-11-11T04:23:31,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=2, number of racks=1 2024-11-11T04:23:31,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2110519005=1, srv1236022028=0} racks are {rack=0} 2024-11-11T04:23:31,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=2, number of racks=1 2024-11-11T04:23:31,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1884055103=0, srv527713550=1} racks are {rack=0} 2024-11-11T04:23:31,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=2, number of racks=1 2024-11-11T04:23:31,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv239072824=0, srv823059063=1} racks are {rack=0} 2024-11-11T04:23:31,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=2, number of racks=1 2024-11-11T04:23:31,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.2888503755054882 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.33333333333333337); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1909226289=0, srv483848638=1} racks are {rack=0} 2024-11-11T04:23:31,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:31,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1762602492=0, srv98769428=1} racks are {rack=0} 2024-11-11T04:23:31,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-11T04:23:31,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv422146514=0, srv993352590=1} racks are {rack=0} 2024-11-11T04:23:31,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=10, number of hosts=2, number of racks=1 2024-11-11T04:23:31,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.6932409012131716 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.8); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1688690052=0, srv988160271=1} racks are {rack=0} 2024-11-11T04:23:31,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1432, number of hosts=2, number of racks=1 2024-11-11T04:23:31,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.40878413881917497 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4717368961973279); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1994545958=0, srv872389322=1} racks are {rack=0} 2024-11-11T04:23:31,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=53, number of hosts=2, number of racks=1 2024-11-11T04:23:31,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.034662045060658585 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.04000000000000001); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1075798659=0, srv258709954=1, srv258804799=2} racks are {rack=0} 2024-11-11T04:23:31,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=3, number of racks=1 2024-11-11T04:23:31,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896258); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.5003035261608543, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896258); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=7200 2024-11-11T04:23:31,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 57 ms to try 7200 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.5003035261608543 to a new imbalance of 0.004043905257076833. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.3333333333333333); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1847313443=0, srv211845601=2, srv1876489447=1} racks are {rack=0} 2024-11-11T04:23:31,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=3, number of racks=1 2024-11-11T04:23:31,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:31,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.25015176308042714 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.2886751345948129); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1908109034=2, srv1732080100=0, srv1762290251=1} racks are {rack=0} 2024-11-11T04:23:31,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=3, number of racks=1 2024-11-11T04:23:31,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.33333333333333337); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.2888503755054882, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.33333333333333337); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=9600 2024-11-11T04:23:31,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 57 ms to try 9600 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.2888503755054882 to a new imbalance of 0.0030329289428076256. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.25); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2131898231=2, srv1376066518=1, srv1044525264=0} racks are {rack=0} 2024-11-11T04:23:31,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=3, number of racks=1 2024-11-11T04:23:31,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=7200 2024-11-11T04:23:31,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 18 ms to try 7200 different iterations. Found a solution that moves 2 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.008087810514153667. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.6666666666666666); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1136145078=0, srv1543878095=1, srv16150439=2} racks are {rack=0} 2024-11-11T04:23:31,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=3, number of racks=1 2024-11-11T04:23:31,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=9600 2024-11-11T04:23:31,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 24 ms to try 9600 different iterations. Found a solution that moves 2 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:31,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1771219190=1, srv636061749=2, srv1475348685=0} racks are {rack=0} 2024-11-11T04:23:31,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:31,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:31,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:31,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:31,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:31,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:31,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=20, number of hosts=3, number of racks=1 2024-11-11T04:23:31,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4871794871794873); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:31,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.42216593343109815, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4871794871794873); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=96000 2024-11-11T04:23:32,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 221 ms to try 96000 different iterations. Found a solution that moves 13 regions; Going from a computed imbalance of 0.42216593343109815 to a new imbalance of 0.003942807625649913. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.325); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:32,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv224603845=2, srv1816652999=1, srv1367259004=0, srv24556100=3} racks are {rack=0} 2024-11-11T04:23:32,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:32,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:32,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:32,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:32,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:32,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:32,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:32,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:32,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=4, number of racks=1 2024-11-11T04:23:32,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.29457175359290033); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.25526148491585815, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.29457175359290033); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=19200 2024-11-11T04:23:32,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 27 ms to try 19200 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.25526148491585815 to a new imbalance of 0.0020219526285384167. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.16666666666666666); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:32,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1290977308=0, srv364108230=2, srv493992031=3, srv1882347623=1} racks are {rack=0} 2024-11-11T04:23:32,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:32,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:32,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:32,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:32,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:32,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:32,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:32,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:32,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=4, number of racks=1 2024-11-11T04:23:32,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=12800 2024-11-11T04:23:32,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 19 ms to try 12800 different iterations. Found a solution that moves 3 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.009098786828422877. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.75); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:32,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1376913108=0, srv910125149=3, srv652807109=2, srv1575763558=1} racks are {rack=0} 2024-11-11T04:23:32,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:32,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:32,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:32,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:32,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:32,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:32,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:32,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:32,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=5, number of hosts=4, number of racks=1 2024-11-11T04:23:32,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=16000 2024-11-11T04:23:32,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 24 ms to try 16000 different iterations. Found a solution that moves 3 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.007279029462738302. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.6); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:32,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv527996066=2, srv1817524549=0, srv998209453=3, srv1912927311=1} racks are {rack=0} 2024-11-11T04:23:32,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:32,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:32,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:32,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:32,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:32,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:32,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:32,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:32,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-11T04:23:32,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896257); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.5003035261608542, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896257); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=38400 2024-11-11T04:23:32,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 55 ms to try 38400 different iterations. Found a solution that moves 6 regions; Going from a computed imbalance of 0.5003035261608542 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:32,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv915409474=2, srv1104295353=0, srv19534178=1, srv960958155=3} racks are {rack=0} 2024-11-11T04:23:32,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:32,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:32,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:32,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:32,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:32,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:32,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:32,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:32,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-11T04:23:32,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7071067811865475); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.6127441778046339, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7071067811865475); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=25600 2024-11-11T04:23:32,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 36 ms to try 25600 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.6127441778046339 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:32,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv983256394=2, srv1393807318=0, srv669911525=1, srv985528239=3} racks are {rack=0} 2024-11-11T04:23:32,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:32,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:32,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:32,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:32,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:32,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:32,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:32,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:32,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-11T04:23:32,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7907604410896715); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.6852343510309111, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7907604410896715); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=22400 2024-11-11T04:23:32,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 34 ms to try 22400 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.6852343510309111 to a new imbalance of 0.006932409012131715. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5714285714285714); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:32,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv754281497=3, srv638088642=2, srv1362751207=0, srv1650613800=1} racks are {rack=0} 2024-11-11T04:23:32,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:32,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:32,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:32,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:32,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:32,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:32,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:32,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:32,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-11T04:23:32,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=19200 2024-11-11T04:23:32,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 30 ms to try 19200 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.008087810514153667. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.6666666666666666); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:32,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1934004945=1, srv740987699=3, srv1610446922=0, srv350206128=2} racks are {rack=0} 2024-11-11T04:23:32,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:32,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:32,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:32,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:32,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:32,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:32,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:32,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:32,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=7, number of hosts=4, number of racks=1 2024-11-11T04:23:32,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:32,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0962834585018294 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.11111111111111113); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:32,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv709433363=2, srv872268081=3, srv1218040961=0, srv1724982098=1} racks are {rack=0} 2024-11-11T04:23:32,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:32,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:32,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:32,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:32,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:32,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:32,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:32,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:32,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=8, number of hosts=4, number of racks=1 2024-11-11T04:23:32,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:32,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.17331022530329285 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.19999999999999996); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:32,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1284346334=0, srv619236049=3, srv1752806734=1, srv2117536344=2} racks are {rack=0} 2024-11-11T04:23:32,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:32,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:32,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:32,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:32,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:32,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:32,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:32,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:32,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=7, number of hosts=4, number of racks=1 2024-11-11T04:23:32,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=22400 2024-11-11T04:23:32,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 34 ms to try 22400 different iterations. Found a solution that moves 5 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.008665511265164644. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.7142857142857143); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:32,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1438800520=2, srv1235817753=1, srv1015281159=0, srv578133253=3, srv783796461=4} racks are {rack=0} 2024-11-11T04:23:32,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:32,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:32,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:32,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:32,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:32,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:32,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:32,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:32,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:32,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:32,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=5, number of racks=1 2024-11-11T04:23:32,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:32,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.22705408170595567 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.26202041028867284); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:32,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1674100324=1, srv12365657=0, srv795875718=5, srv727017236=4, srv549237817=3, srv208038240=2} racks are {rack=0} 2024-11-11T04:23:32,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:32,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:32,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:32,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:32,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:32,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:32,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:32,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:32,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:32,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:32,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:32,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:32,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1500, number of hosts=6, number of racks=1 2024-11-11T04:23:32,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4440183710462696); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:32,464 WARN [Time-limited test {}] balancer.StochasticLoadBalancer(548): calculatedMaxSteps:14448000 for loadbalancer's stochastic walk is larger than maxSteps:1000000. Hence load balancing may not work well. Setting parameter "hbase.master.balancer.stochastic.runMaxSteps" to true can overcome this issue.(This config change does not require service restart) 2024-11-11T04:23:32,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.3847646196241505, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4440183710462696); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=1000000 2024-11-11T04:23:44,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 12322 ms to try 1000000 different iterations. Found a solution that moves 1018 regions; Going from a computed imbalance of 0.3847646196241505 to a new imbalance of 0.004103018822296562. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.3382059800664452); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:44,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:44,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1197507073=1, srv1610559626=2, srv257609173=3, srv394603191=4, srv1005642303=0, srv938795385=5} racks are {rack=0} 2024-11-11T04:23:44,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:44,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:44,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:44,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:44,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:44,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:44,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:44,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:44,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:44,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:44,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:44,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:44,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1500, number of hosts=6, number of racks=1 2024-11-11T04:23:44,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.3438084096789955); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:44,828 WARN [Time-limited test {}] balancer.StochasticLoadBalancer(548): calculatedMaxSteps:16800000 for loadbalancer's stochastic walk is larger than maxSteps:1000000. Hence load balancing may not work well. Setting parameter "hbase.master.balancer.stochastic.runMaxSteps" to true can overcome this issue.(This config change does not require service restart) 2024-11-11T04:23:44,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.2979275647131677, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.3438084096789955); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=1000000 2024-11-11T04:23:58,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 13481 ms to try 1000000 different iterations. Found a solution that moves 924 regions; Going from a computed imbalance of 0.2979275647131677 to a new imbalance of 0.003202772963604853. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.264); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:58,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:58,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv295122316=9, srv1751180691=5, srv1482501616=2, srv1521980180=3, srv1729705103=4, srv228898409=8, srv38735969=11, srv649457663=13, srv85486758=14, srv1834376882=7, srv305463780=10, srv50586107=12, srv178529588=6, srv1386667909=0, srv1461715723=1} racks are {rack=0} 2024-11-11T04:23:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:23:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:23:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:23:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:23:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:23:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:23:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:23:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:23:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:23:58,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:23:58,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=15, number of hosts=15, number of racks=1 2024-11-11T04:23:58,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:58,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.12507588154021357 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.14433756729740646); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:58,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1316715483=2, srv1280479584=1, srv468892292=8, srv1555748652=4, srv752250978=9, srv183493474=5, srv1440622089=3, srv2011923065=7, srv1991139544=6, srv1237381638=0} racks are {rack=0} 2024-11-11T04:23:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:58,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:58,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:58,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:58,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:58,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:58,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:58,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:58,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:58,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:58,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:58,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=10, number of hosts=10, number of racks=1 2024-11-11T04:23:58,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:58,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=80000 2024-11-11T04:23:58,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 115 ms to try 80000 different iterations. Found a solution that moves 9 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.010918544194107453. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.9); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:58,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1598477475=2, srv567003005=7, srv753983506=8, srv1508229296=1, srv259560854=5, srv1386586319=0, srv236314274=4, srv1793189964=3, srv519605308=6, srv977306263=9} racks are {rack=0} 2024-11-11T04:23:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:58,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:58,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:58,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:58,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=10, number of racks=1 2024-11-11T04:23:58,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.055531997651093117 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.06408392528936147); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:58,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1972110783=5, srv137733479=1, srv1895534205=4, srv198257686=6, srv1492400892=2, srv565829526=8, srv2037718778=7, srv1189844906=0, srv1578551731=3, srv960650448=9} racks are {rack=0} 2024-11-11T04:23:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:58,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:58,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=54, number of hosts=10, number of racks=1 2024-11-11T04:23:58,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999999); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:58,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164644, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999999); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=432000 2024-11-11T04:23:59,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 813 ms to try 432000 different iterations. Found a solution that moves 48 regions; Going from a computed imbalance of 0.8665511265164644 to a new imbalance of 0.01078374735220489. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.8888888888888888); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:59,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:23:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1621417116=3, srv198119122=4, srv1012750134=0, srv750495886=9, srv2067138795=5, srv687924969=8, srv1233170792=2, srv387404183=6, srv1162011713=1, srv674559762=7} racks are {rack=0} 2024-11-11T04:23:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:23:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:23:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:23:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:23:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:23:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:23:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:23:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:23:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:23:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:23:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:23:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:23:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:23:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:23:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:23:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:23:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:23:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:23:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:23:59,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:23:59,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=55, number of hosts=10, number of racks=1 2024-11-11T04:23:59,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:23:59,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=440000 2024-11-11T04:24:00,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 859 ms to try 440000 different iterations. Found a solution that moves 49 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.010808255868914448. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.8909090909090909); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:00,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:24:00,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1970915077=6, srv581505182=9, srv1557536733=4, srv268749591=8, srv1391085007=2, srv1575117578=5, srv1976805693=7, srv1404412669=3, srv1338394201=1, srv1018785199=0} racks are {rack=0} 2024-11-11T04:24:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:24:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:24:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:24:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:24:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:24:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:24:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:24:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:24:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:24:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:24:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:24:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:24:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:24:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:24:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:24:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:24:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:24:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:24:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:24:00,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:24:00,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=56, number of hosts=10, number of racks=1 2024-11-11T04:24:00,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:00,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=448000 2024-11-11T04:24:00,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 841 ms to try 448000 different iterations. Found a solution that moves 50 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.010831889081455806. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.8928571428571429); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:00,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:24:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv655271781=9, srv1551431795=4, srv1895461290=6, srv1229297294=2, srv251593887=7, srv1527808953=3, srv1125861416=0, srv1673645424=5, srv1195464071=1, srv511871870=8} racks are {rack=0} 2024-11-11T04:24:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:24:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:24:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:24:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:24:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:24:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:24:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:24:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:24:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:24:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:24:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:24:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:24:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:24:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:24:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:24:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:24:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:24:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:24:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:24:00,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:24:00,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=16, number of hosts=10, number of racks=1 2024-11-11T04:24:00,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:00,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=128000 2024-11-11T04:24:01,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 204 ms to try 128000 different iterations. Found a solution that moves 14 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.01061525129982669. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.875); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:01,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:24:01,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1231404834=1, srv2048783529=6, srv781011420=9, srv1389944333=3, srv2100687481=7, srv136097322=2, srv2107604290=8, srv1652030095=5, srv1228150689=0, srv1499289757=4} racks are {rack=0} 2024-11-11T04:24:01,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=8, number of hosts=10, number of racks=1 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.30649131741006164 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.35369098029121115); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1778502138=2, srv774318027=8, srv235011688=5, srv1690069134=1, srv2062683099=3, srv225775471=4, srv117502396=0, srv53423330=6, srv57908477=7, srv874202847=9} racks are {rack=0} 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=9, number of hosts=10, number of racks=1 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:24:01,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.34662045060658575 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.39999999999999997); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:24:01,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1743899536=3, srv2061586024=5, srv2083104099=6, srv1444281212=1, srv83145935=8, srv173874585=2, srv959118909=9, srv127036534=0, srv469777332=7, srv1918865391=4} racks are {rack=0} 2024-11-11T04:24:01,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:24:01,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:24:01,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:24:01,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:24:01,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:24:01,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:24:01,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:24:01,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:24:01,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:24:01,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:24:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:24:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:24:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:24:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:24:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:24:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:24:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:24:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:24:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:24:01,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:24:01,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=10, number of hosts=10, number of racks=1 2024-11-11T04:24:01,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:24:01,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.3851338340073176 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.44444444444444453); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:01,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:24:01,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1375538406=2, srv1491734476=3, srv2043961119=6, srv1656758698=4, srv1364844411=1, srv744770335=8, srv889978354=9, srv1928454304=5, srv1031896453=0, srv67328628=7} racks are {rack=0} 2024-11-11T04:24:01,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:24:01,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:24:01,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:24:01,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:24:01,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:24:01,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:24:01,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:24:01,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:24:01,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:24:01,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:24:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:24:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:24:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:24:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:24:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:24:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:24:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:24:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:24:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:24:01,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:24:01,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=123, number of hosts=10, number of racks=1 2024-11-11T04:24:01,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:24:01,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.8002334382626535 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.923469387755102); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:01,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:24:01,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv575306897=3, srv806287869=7, srv1697706586=0, srv639017002=4, srv78772193=6, srv1922534779=1, srv880050734=8, srv662797485=5, srv9456860=9, srv541273524=2} racks are {rack=0} 2024-11-11T04:24:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:24:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:24:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:24:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:24:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:24:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:24:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:24:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:24:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:24:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:24:01,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:24:01,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:24:01,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:24:01,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:24:01,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:24:01,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:24:01,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:24:01,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:24:01,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:24:01,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:24:01,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=155, number of hosts=10, number of racks=1 2024-11-11T04:24:01,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:24:01,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.8131812243798632 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9384111329343621); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:01,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:24:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1365723141=2, srv830836496=6, srv1449193023=3, srv1047458880=0, srv1204421605=1, srv894205266=7, srv385676257=5, srv1488072983=4} racks are {rack=0} 2024-11-11T04:24:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:24:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:24:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:24:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:24:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:24:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:24:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:24:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:24:01,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:24:01,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:24:01,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:24:01,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:24:01,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:24:01,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:24:01,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:24:01,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:24:01,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=14, number of hosts=8, number of racks=1 2024-11-11T04:24:01,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:24:01,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.05755254949858986 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0664156421213727); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:01,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:24:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv569232963=7, srv1031461687=0, srv386218468=6, srv1839651469=2, srv2128566331=4, srv1539397645=1, srv355283413=5, srv2072177801=3} racks are {rack=0} 2024-11-11T04:24:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:24:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:24:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:24:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:24:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:24:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:24:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:24:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:24:01,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:24:01,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:24:01,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:24:01,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:24:01,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:24:01,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:24:01,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:24:01,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:24:01,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=14, number of hosts=8, number of racks=1 2024-11-11T04:24:01,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:24:01,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.06673965003400768 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.07701755613924488); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:01,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:24:01,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1803872351=3, srv1639535439=2, srv1628776968=1, srv291807733=5, srv1436261025=0, srv544447471=6, srv861508692=7, srv191710663=4} racks are {rack=0} 2024-11-11T04:24:01,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:24:01,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:24:01,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:24:01,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:24:01,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:24:01,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:24:01,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:24:01,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:24:01,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:24:01,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:24:01,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:24:01,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:24:01,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:24:01,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:24:01,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:24:01,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:24:01,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=130, number of hosts=8, number of racks=1 2024-11-11T04:24:01,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:24:01,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.28093705674099306 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.32420136347910594); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:01,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:24:01,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1272681015=2, srv1736166640=4, srv1046828829=1, srv1002673590=0, srv1864477668=5, srv474411805=7, srv1319047410=3, srv1929135095=6} racks are {rack=0} 2024-11-11T04:24:01,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:24:01,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:24:01,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:24:01,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:24:01,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:24:01,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:24:01,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:24:01,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:24:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:24:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:24:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:24:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:24:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:24:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:24:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:24:01,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:24:01,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=140, number of hosts=8, number of racks=1 2024-11-11T04:24:01,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-11T04:24:01,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.07533492111851356 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.08693649897076465); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:01,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:24:01,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2044255287=2, srv171098075=0, srv757276415=4, srv1908494855=1, srv475468373=3} racks are {rack=0} 2024-11-11T04:24:01,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:24:01,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:24:01,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:24:01,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:24:01,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:24:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:24:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:24:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:24:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:24:01,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:24:01,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=5, number of hosts=5, number of racks=1 2024-11-11T04:24:01,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.25); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:01,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.21663778162911612, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.25); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=80000 2024-11-11T04:24:01,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 113 ms to try 80000 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.21663778162911612 to a new imbalance of 0.0024263431542461008. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.2); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:01,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-11T04:24:01,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv912156849=374, srv130100733=59, srv1428955631=86, srv916562565=376, srv1892309271=193, srv1542555366=121, srv1330580966=66, srv376773038=274, srv666754447=326, srv1956485665=200, srv1413083017=81, srv920587127=378, srv1503526556=108, srv1924048303=196, srv585993694=311, srv1118189697=17, srv1724637513=168, srv1725677764=169, srv1673193821=158, srv7620457=347, srv1813301074=183, srv239028326=243, srv1547292142=122, srv528259833=300, srv1642335817=149, srv2018204386=214, srv839931129=358, srv1970840443=204, srv877341622=366, srv739439825=342, srv1585176098=134, srv1471668995=101, srv165525469=150, srv611524272=318, srv262296231=249, srv676432671=330, srv961629831=389, srv1511808074=112, srv1433330688=87, srv72033309=335, srv1287006481=55, srv1608145603=138, srv1926900091=197, srv1186316073=34, srv1215520136=41, srv602705369=315, srv1611071000=140, srv78614802=350, srv1554713775=124, srv866337413=363, srv220723680=236, srv681149367=331, srv786612599=351, srv660920545=325, srv1237484866=47, srv2013733588=213, srv843135733=359, srv1351933945=71, srv1689367507=162, srv1737509889=172, srv956058623=387, srv981404742=392, srv247875434=247, srv1640529796=146, srv394797548=276, srv1664830812=155, srv1038261738=6, srv385720551=275, srv1456524608=97, srv301391594=260, srv1922060329=195, srv1426323595=85, srv190323609=194, srv1145009914=27, srv1024829541=5, srv838936483=357, srv1609472725=139, srv1585556101=135, srv653915445=323, srv513146428=296, srv446974404=283, srv965184179=390, srv1205811394=37, srv1301878105=60, srv283410568=258, srv511913628=295, srv864325155=361, srv2055288832=222, srv1528522472=118, srv1960772286=201, srv745939114=343, srv1789967770=180, srv1516380637=115, srv1985027838=207, srv1356266579=73, srv1565176781=128, srv757878426=346, srv492289875=290, srv1344614239=69, srv20450715=220, srv326356827=267, srv1497773099=107, srv3764360=273, srv1716829776=166, srv2121324119=231, srv415193566=280, srv1266979100=51, srv1115174999=15, srv1447600806=94, srv1774819550=179, srv948611208=384, srv444765411=282, srv109388894=10, srv1641384949=148, srv1273657807=52, srv320871498=266, srv1513641201=114, srv101657126=4, srv1005641220=1, srv1139010850=22, srv1576776459=132, srv736238999=340, srv1220921145=43, srv1673511357=159, srv209296264=226, srv923057881=379, srv173845708=173, srv1512590581=113, srv246957836=246, srv223866423=238, srv1135724722=20, srv1728664449=170, srv1741814766=174, srv610297782=317, srv582320812=310, srv1487780360=105, srv1624858586=143, srv1631519021=144, srv1482333861=103, srv636636277=319, srv1145102505=28, srv1448401873=95, srv1549458928=123, srv605835840=316, srv829656122=355, srv1666072899=156, srv186360204=190, srv877284565=365, srv2044562702=219, srv1483008721=104, srv1139263995=23, srv1158634728=32, srv793386890=352, srv738243487=341, srv1435629081=90, srv1004790049=0, srv655689141=324, srv1261336951=49, srv1978311835=206, srv1674339775=160, srv1510315451=110, srv591642856=312, srv864843095=362, srv2003655927=211, srv1145391973=29, srv1663936418=153, srv1965918319=202, srv485447140=288, srv1988732187=209, srv1682337875=161, srv375248089=272, srv903855712=372, srv224275947=239, srv1403567152=80, srv1713597622=165, srv1862007575=189, srv236594348=242, srv1732934510=171, srv1042858609=7, srv643276107=321, srv331486958=268, srv1306262123=62, srv733034074=339, srv1286011360=54, srv202315556=215, srv1141474421=24, srv57152271=307, srv2120300387=229, srv495633385=291, srv1769465371=178, srv429878252=281, srv1993790153=210, srv547564451=303, srv598508829=313, srv1292297550=57, srv1557322836=126, srv501560915=293, srv468064786=285, srv269120867=253, srv471645590=286, srv1094537142=11, srv902151684=371, srv161789867=141, srv545528568=302, srv1211977148=40, srv674439882=328, srv339185176=271, srv1588439261=136, srv484669797=287, srv220819466=237, srv216438694=233, srv1274254109=53, srv1309461231=64, srv795566135=353, srv1203349446=36, srv487673772=289, srv511681402=294, srv399408053=277, srv201274937=212, srv174982599=175, srv715372597=334, srv1336178508=68, srv1144991044=26, srv243520554=245, srv518617050=299, srv218975234=234, srv1465500507=98, srv670738148=327, srv1511800924=111, srv764560888=348, srv24114089=244, srv1722152543=167, srv2101612625=227, srv1507781304=109, srv646928839=322, srv1946020398=198, srv600414586=314, srv951578650=386, srv280458286=257, srv303215797=262, srv188030914=192, srv1953006845=199, srv571716468=308, srv1013925795=3, srv143368215=88, srv1556280272=125, srv2034506395=217, srv1211454935=39, srv1535897010=119, srv1845702229=185, srv227039027=240, srv1521849112=116, srv1417993205=83, srv1451169498=96, srv1147422372=30, srv411492770=278, srv707483192=332, srv874066340=364, srv1138186398=21, srv1357712679=74, srv1701378139=164, srv1173130293=33, srv1416576607=82, srv513620976=297, srv1353384216=72, srv1006299537=2, srv49803658=292, srv1528150997=117, srv835162126=356, srv1967267960=203, srv1058683468=8, srv2120606238=230, srv550535530=304, srv784338564=349, srv1230098730=45, srv154044144=120, srv1792110837=181, srv573284745=309, srv1109149907=14, srv1346016869=70, srv1116965307=16, srv1298863744=58, srv674690006=329, srv732514675=338, srv1260897925=48, srv749478409=345, srv251358601=248, srv1358593381=75, srv1447199268=93, srv1223449696=44, srv265761331=250, srv1103816371=13, srv726960483=337, srv205368962=221, srv1577923575=133, srv312577891=264, srv1186657517=35, srv1470932235=100, srv1661709973=152, srv266036365=251, srv1142081582=25, srv1467338550=99, srv708635853=333, srv143829088=91, srv2036684850=218, srv212356977=232, srv278314781=256, srv1308036413=63, srv2069111806=224, srv312096658=263, srv1664390412=154, srv1080747047=9, srv1657648771=151, srv415166282=279, srv536697252=301, srv1641172379=147, srv1372464684=76, srv456496444=284, srv1100874440=12, srv639739798=320, srv26888993=252, srv894205137=370, srv11287007=18, srv1209077305=38, srv917353743=377, srv1566189528=129, srv1854013100=188, srv1619721706=142, srv1335776694=67, srv1876077878=191, srv906601783=373, srv928793711=382, srv1220026605=42, srv1847988027=186, srv1841065944=184, srv2092484491=225, srv1266973599=50, srv1808615091=182, srv886217492=369, srv855403639=360, srv1439034844=92, srv880929041=367, srv2024843922=216, srv175513410=176, srv881209276=368, srv335372531=269, srv562942576=306, srv2115224480=228, srv1600567264=137, srv1568541658=131, srv1387870541=79, srv143437565=89, srv1479714918=102, srv293129700=259, srv1759328833=177, srv725648047=336, srv336384748=270, srv913344630=375, srv23567981=241, srv1491578576=106, srv301523838=261, srv142047077=84, srv1563283249=127, srv799562448=354, srv1288737100=56, srv219025654=235, srv1384926397=78, srv274397861=255, srv928119431=380, srv1987944932=208, srv1153464574=31, srv1328766876=65, srv130313569=61, srv1852162069=187, srv1378451556=77, srv958098409=388, srv1129680613=19, srv928429064=381, srv1568364789=130, srv977232052=391, srv1668040450=157, srv271215734=254, srv1636944101=145, srv949381643=385, srv749447766=344, srv516929532=298, srv2066142306=223, srv1694900317=163, srv31671951=265, srv945171220=383, srv552642659=305, srv1231952756=46, srv1975873662=205} racks are {rack=0} 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-11T04:24:01,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-11T04:24:01,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-11T04:24:01,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-11T04:24:01,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-11T04:24:01,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-11T04:24:01,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-11T04:24:01,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-11T04:24:01,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-11T04:24:01,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-11T04:24:01,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-11T04:24:01,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=56, number of hosts=393, number of racks=1 2024-11-11T04:24:01,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999991); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:01,309 WARN [Time-limited test {}] balancer.StochasticLoadBalancer(548): calculatedMaxSteps:17606400 for loadbalancer's stochastic walk is larger than maxSteps:1000000. Hence load balancing may not work well. Setting parameter "hbase.master.balancer.stochastic.runMaxSteps" to true can overcome this issue.(This config change does not require service restart) 2024-11-11T04:24:01,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164637, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999991); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=1000000 2024-11-11T04:24:06,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 4940 ms to try 1000000 different iterations. Found a solution that moves 55 regions; Going from a computed imbalance of 0.8665511265164637 to a new imbalance of 0.011915077989601387. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.9821428571428571); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-11T04:24:06,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-11T04:24:06,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 577.0 etc. 2024-11-11T04:24:06,270 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: master.balancer.TestBalancerDecision#testBalancerDecisions Thread=13 (was 12) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=286 (was 286), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=160 (was 182), ProcessCount=11 (was 11), AvailableMemoryMB=4970 (was 5651)